text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""General utility functions for devappserver2."""
import wsgiref.headers
def get_headers_from_environ(environ):
"""Get a wsgiref.headers.Headers object with headers from the environment.
Headers in environ are prefixed with 'HTTP_', are all uppercase, and have
had dashes replaced with underscores. This strips the HTTP_ prefix and
changes underscores back to dashes before adding them to the returned set
of headers.
Args:
environ: An environ dict for the request as defined in PEP-333.
Returns:
A wsgiref.headers.Headers object that's been filled in with any HTTP
headers found in environ.
"""
headers = wsgiref.headers.Headers([])
for header, value in environ.iteritems():
if header.startswith('HTTP_'):
headers[header[5:].replace('_', '-')] = value
# Content-Type is special; it does not start with 'HTTP_'.
if 'CONTENT_TYPE' in environ:
headers['CONTENT-TYPE'] = environ['CONTENT_TYPE']
return headers
def put_headers_in_environ(headers, environ):
"""Given a list of headers, put them into environ based on PEP-333.
This converts headers to uppercase, prefixes them with 'HTTP_', and
converts dashes to underscores before adding them to the environ dict.
Args:
headers: A list of (header, value) tuples. The HTTP headers to add to the
environment.
environ: An environ dict for the request as defined in PEP-333.
"""
for key, value in headers:
environ['HTTP_%s' % key.upper().replace('-', '_')] = value
|
{
"content_hash": "e5860283436507555217601609a0d8a3",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 78,
"avg_line_length": 32.71739130434783,
"alnum_prop": 0.707641196013289,
"repo_name": "ychen820/microblog",
"id": "115efd30f22ae475df48ed2b89478dfc05c6dd04",
"size": "2106",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "y/google-cloud-sdk/platform/google_appengine/google/appengine/tools/devappserver2/util.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "414229"
},
{
"name": "CSS",
"bytes": "257787"
},
{
"name": "Emacs Lisp",
"bytes": "4733"
},
{
"name": "Groff",
"bytes": "1236200"
},
{
"name": "HTML",
"bytes": "2617468"
},
{
"name": "JavaScript",
"bytes": "1106437"
},
{
"name": "Makefile",
"bytes": "15714"
},
{
"name": "Objective-C",
"bytes": "26302"
},
{
"name": "PHP",
"bytes": "2511443"
},
{
"name": "Perl",
"bytes": "1109010"
},
{
"name": "Python",
"bytes": "71588489"
},
{
"name": "R",
"bytes": "548"
},
{
"name": "Shell",
"bytes": "49796"
},
{
"name": "TeX",
"bytes": "3149"
},
{
"name": "VimL",
"bytes": "5645"
}
],
"symlink_target": ""
}
|
import unittest, StringIO, sys
from libgsync.output import Channel, Debug, Itemize, Progress, Critical
class TestCaseStdStringIO(unittest.TestCase):
def setUp(self):
self.stdout, sys.stdout = sys.stdout, StringIO.StringIO()
self.stderr, sys.stderr = sys.stderr, StringIO.StringIO()
def tearDown(self):
sys.stdout = self.stdout
sys.stderr = self.stderr
class TestChannel(TestCaseStdStringIO):
def test_disabled_by_default(self):
channel = Channel()
self.assertFalse(channel.enabled())
def test_no_output_when_disabled(self):
channel = Channel()
channel.disable()
self.assertFalse(channel.enabled())
channel("Hello World")
self.assertEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
def test_output_when_enabled(self):
channel = Channel()
channel.enable()
self.assertTrue(channel.enabled())
channel("Hello World")
self.assertEqual("Hello World\n", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
class TestCritical(TestCaseStdStringIO):
def test_call(self):
channel = Critical()
try:
raise Exception("CriticalException")
except Exception, ex:
channel(ex)
import re
pat = re.compile(
r'^gsync: CriticalException\n' \
r'gsync error: Exception at .*\(\d+\) \[client=[\d.]+\]\n$',
re.M | re.S
)
self.assertIsNotNone(pat.search(sys.stderr.getvalue()))
self.assertEqual("", sys.stdout.getvalue())
class TestDebug(TestCaseStdStringIO):
def test_stack(self):
channel = Debug()
channel.enable()
self.assertTrue(channel.enabled())
channel.stack()
import re
pat = re.compile(
r'^DEBUG: BEGIN STACK TRACE\n' \
r'.*\n' \
r'DEBUG: END STACK TRACE\n$',
re.M | re.S
)
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
self.assertEqual("", sys.stderr.getvalue())
def test_exception_as_object(self):
channel = Debug()
channel.enable()
self.assertTrue(channel.enabled())
import re
pat = re.compile(
r'''^DEBUG: Exception\('Test exception',\): ''',
re.M | re.S
)
try:
raise Exception("Test exception")
except Exception, e:
channel.exception(e)
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
self.assertEqual("", sys.stderr.getvalue())
def test_exception_as_string(self):
channel = Debug()
channel.enable()
self.assertTrue(channel.enabled())
import re
pat = re.compile(
r'''^DEBUG: 'Test exception': ''',
re.M | re.S
)
try:
raise Exception("Test exception")
except Exception, e:
channel.exception(str(e))
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
self.assertEqual("", sys.stderr.getvalue())
def test_exception_as_custom_string(self):
channel = Debug()
channel.enable()
self.assertTrue(channel.enabled())
custom_string = "This is a custom string"
import re
pat = re.compile(
r'''^DEBUG: %s: ''' % repr(custom_string),
re.M | re.S
)
try:
raise Exception("Test exception")
except Exception, e:
channel.exception(custom_string)
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
self.assertEqual("", sys.stderr.getvalue())
def test_exception_as_default(self):
channel = Debug()
channel.enable()
self.assertTrue(channel.enabled())
import re
pat = re.compile(
r'''^DEBUG: 'Exception': ''',
re.M | re.S
)
try:
raise Exception("Test exception")
except Exception, e:
channel.exception()
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
self.assertEqual("", sys.stderr.getvalue())
class TestItemize(TestCaseStdStringIO):
def test_callable(self):
channel = Itemize()
channel(">+", "/dev/null")
self.assertEqual(sys.stdout.getvalue(), " >+ /dev/null\n")
self.assertEqual("", sys.stderr.getvalue())
sys.stdout.truncate(0)
channel(">+++++++++++++++++", "/dev/null")
self.assertEqual(sys.stdout.getvalue(), ">++++++++++ /dev/null\n")
self.assertEqual("", sys.stderr.getvalue())
class ProgressStatus(object):
def __init__(self, total_size = 0, resumable_progress = 0):
self.total_size = total_size
self.resumable_progress = resumable_progress
def progress(self):
return float(self.resumable_progress) / float(self.total_size)
class TestProgress(TestCaseStdStringIO):
def test_with_disabled_output(self):
channel = Progress(enable_output=False)
self.assertEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
def test_enabled_output_by_default(self):
channel = Progress()
self.assertNotEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
def test_with_enabled_output(self):
channel = Progress(enable_output=True)
self.assertNotEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
def test_status_messages_with_callback(self):
def callback(status):
callback.called = True
callback.called = False
channel = Progress(callback=callback)
self.assertNotEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
import re
for i in ( 5, 10, 20, 40, 50, 75, 100 ):
pat = re.compile(
r'^\s+%d\s+%d%%\s+\d+\.\d{2}(?:B|KB|MB|GB|TB)/s\s+\d+:\d+:\d+$' % (i, i),
re.S | re.M
)
sys.stdout.truncate(0)
channel(ProgressStatus(100, i))
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
self.assertTrue(callback.called)
def test_rate_normalization(self):
channel = Progress()
self.assertNotEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
fileSize = 1000000000
import re
pat = re.compile(
r'^\s+%d\s+%d%%\s+\d+\.\d{2}(?:KB|MB|GB|TB)/s\s+\d+:\d+:\d+$' % (fileSize, 100),
re.S | re.M
)
sys.stdout.truncate(0)
channel(ProgressStatus(fileSize, fileSize / 4))
self.assertIsNone(pat.search(sys.stdout.getvalue()))
sys.stdout.truncate(0)
channel.complete(fileSize)
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
def test_zero_byte_file(self):
channel = Progress()
self.assertNotEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
import re
pat = re.compile(
r'^\s+%d\s+%d%%\s+\d+\.\d{2}(?:B|KB|MB|GB|TB)/s\s+\d+:\d+:\d+$' % (0, 100),
re.S | re.M
)
sys.stdout.truncate(0)
channel.complete(0)
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
def test_complete(self):
channel = Progress()
self.assertNotEqual("", sys.stdout.getvalue())
self.assertEqual("", sys.stderr.getvalue())
import re
pat = re.compile(
r'^\s+%d\s+%d%%\s+\d+\.\d{2}(?:B|KB|MB|GB|TB)/s\s+\d+:\d+:\d+$' % (100, 100),
re.S | re.M
)
sys.stdout.truncate(0)
channel(ProgressStatus(100, 25))
self.assertIsNone(pat.search(sys.stdout.getvalue()))
sys.stdout.truncate(0)
channel.complete(100)
self.assertIsNotNone(pat.search(sys.stdout.getvalue()))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "958776f1d35b80e2e480c3cba0e7676a",
"timestamp": "",
"source": "github",
"line_count": 292,
"max_line_length": 92,
"avg_line_length": 27.756849315068493,
"alnum_prop": 0.5643429981492906,
"repo_name": "iwonbigbro/gsync",
"id": "0d9e1e6b19e320e21278d98f8113dc33991c3cd9",
"size": "8188",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/libgsync/test_output.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2629"
},
{
"name": "Python",
"bytes": "135977"
},
{
"name": "Shell",
"bytes": "8494"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.model.document import Document
from frappe.desk.doctype.notification_settings.notification_settings import (is_notifications_enabled, is_email_notifications_enabled_for_type, set_seen_value)
class NotificationLog(Document):
def after_insert(self):
frappe.publish_realtime('notification', after_commit=True, user=self.for_user)
set_notifications_as_unseen(self.for_user)
if is_email_notifications_enabled_for_type(self.for_user, self.type):
send_notification_email(self)
def get_permission_query_conditions(for_user):
if not for_user:
for_user = frappe.session.user
if for_user == 'Administrator':
return
return '''(`tabNotification Log`.for_user = '{user}')'''.format(user=for_user)
def get_title(doctype, docname, title_field=None):
if not title_field:
title_field = frappe.get_meta(doctype).get_title_field()
title = docname if title_field == "name" else \
frappe.db.get_value(doctype, docname, title_field)
return title
def get_title_html(title):
return '<b class="subject-title">{0}</b>'.format(title)
def enqueue_create_notification(users, doc):
'''
During installation of new site, enqueue_create_notification tries to connect to Redis.
This breaks new site creation if Redis server is not running.
We do not need any notifications in fresh installation
'''
if frappe.flags.in_install:
return
doc = frappe._dict(doc)
if isinstance(users, frappe.string_types):
users = [user.strip() for user in users.split(',') if user.strip()]
users = list(set(users))
frappe.enqueue(
'frappe.desk.doctype.notification_log.notification_log.make_notification_logs',
doc=doc,
users=users,
now=frappe.flags.in_test
)
def make_notification_logs(doc, users):
from frappe.social.doctype.energy_point_settings.energy_point_settings import is_energy_point_enabled
for user in users:
if frappe.db.exists('User', user):
if is_notifications_enabled(user):
if doc.type == 'Energy Point' and not is_energy_point_enabled():
return
_doc = frappe.new_doc('Notification Log')
_doc.update(doc)
_doc.for_user = user
if _doc.for_user != _doc.from_user or doc.type == 'Energy Point' or doc.type == 'Alert':
_doc.insert(ignore_permissions=True)
def send_notification_email(doc):
if doc.type == 'Energy Point' and doc.email_content is None:
return
from frappe.utils import get_url_to_form, strip_html
doc_link = get_url_to_form(doc.document_type, doc.document_name)
header = get_email_header(doc)
email_subject = strip_html(doc.subject)
frappe.sendmail(
recipients = doc.for_user,
subject = email_subject,
template = "new_notification",
args = {
'body_content': doc.subject,
'description': doc.email_content,
'document_type': doc.document_type,
'document_name': doc.document_name,
'doc_link': doc_link
},
header = [header, 'orange'],
now=frappe.flags.in_test
)
def get_email_header(doc):
docname = doc.document_name
header_map = {
'Default': _('New Notification'),
'Mention': _('New Mention on {0}').format(docname),
'Assignment': _('Assignment Update on {0}').format(docname),
'Share': _('New Document Shared {0}').format(docname),
'Energy Point': _('Energy Point Update on {0}').format(docname),
}
return header_map[doc.type or 'Default']
@frappe.whitelist()
def mark_all_as_read():
unread_docs_list = frappe.db.get_all('Notification Log', filters = {'read': 0, 'for_user': frappe.session.user})
unread_docnames = [doc.name for doc in unread_docs_list]
if unread_docnames:
filters = {'name': ['in', unread_docnames]}
frappe.db.set_value('Notification Log', filters, 'read', 1, update_modified=False)
@frappe.whitelist()
def mark_as_read(docname):
if docname:
frappe.db.set_value('Notification Log', docname, 'read', 1, update_modified=False)
@frappe.whitelist()
def trigger_indicator_hide():
frappe.publish_realtime('indicator_hide', user=frappe.session.user)
def set_notifications_as_unseen(user):
try:
frappe.db.set_value('Notification Settings', user, 'seen', 0)
except frappe.DoesNotExistError:
return
|
{
"content_hash": "2589e88ebd7fdfcd1a2a75ef7f97c846",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 159,
"avg_line_length": 31.61832061068702,
"alnum_prop": 0.7170449058425882,
"repo_name": "adityahase/frappe",
"id": "c4c6077e855866db61a586fda31b382c9d7e6633",
"size": "4276",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/desk/doctype/notification_log/notification_log.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "288806"
},
{
"name": "HTML",
"bytes": "209164"
},
{
"name": "JavaScript",
"bytes": "2350450"
},
{
"name": "Less",
"bytes": "160693"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "3035663"
},
{
"name": "SCSS",
"bytes": "45340"
},
{
"name": "Shell",
"bytes": "517"
},
{
"name": "Vue",
"bytes": "73943"
}
],
"symlink_target": ""
}
|
from JumpScale import j
descr = """
Check on average cpu
"""
organization = "jumpscale"
author = "deboeckj@codescalers.com"
license = "bsd"
version = "1.0"
period = 15*60 # always in sec
startatboot = True
order = 1
enable = True
async = True
log = False
queue ='process'
roles = ['master']
def action():
try:
import JumpScale.baselib.watchdog.client
except Exception:
return
import JumpScale.grid.osis
ocl = j.core.osis.getClientByInstance('main')
scl = j.core.osis.getClientForCategory(ocl, 'system', 'stats')
results = scl.search({'target':'smartSummarize(n*.system.cpu.percent, "1hour", "avg")', 'from': '-1h'})
for noderesult in results:
avgcpu, timestamp = noderesult['datapoints'][-1]
target = noderesult['target']
nid = int(target[len('smartSummarize(n'):].split('.')[0])
if avgcpu > 95:
state = 'CRITICAL'
elif avgcpu > 80:
state = 'WARNING'
else:
state = 'OK'
j.tools.watchdog.client.send("cpu.core", state, avgcpu, nid=nid)
|
{
"content_hash": "1a4b3610b466258a5714f36dc98e5fc8",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 107,
"avg_line_length": 26.9,
"alnum_prop": 0.6171003717472119,
"repo_name": "Jumpscale/jumpscale6_core",
"id": "ac456eb4462138315ab3d0779f626a9d8c346023",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/agentcontroller/jumpscripts/extended/alerts/alert_cpu_core.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "3681"
},
{
"name": "HTML",
"bytes": "11738"
},
{
"name": "JavaScript",
"bytes": "70132"
},
{
"name": "Lua",
"bytes": "2162"
},
{
"name": "Python",
"bytes": "5848017"
},
{
"name": "Shell",
"bytes": "7692"
}
],
"symlink_target": ""
}
|
import pandas as pd
import warnings
import weakref
def lazy_property(fn):
'''Decorator that makes a property lazy-evaluated.
'''
attr_name = fn.__name__
@property
def _lazy_property(self):
if attr_name not in self._values.keys():
self._values[attr_name] = fn(self)
return self._values[attr_name]
return _lazy_property
class BaseObject(object):
static_properties = {}
properties = {}
def __init__(self, uid, network):
# the object index
self.uid = uid
# weak reference to the network
self.network = weakref.ref(network)
# cache of values
self._values = {}
# dictionary of calculation results, only gets
# filled during solve() method
self.results = {}
# list of times
self.times = []
# index caching
self._index = None
def get_index(self, uid):
raise NotImplementedError
def set_object_value(self, code, value):
raise NotImplementedError
def get_object_value(self, code):
raise NotImplementedError
def reset(self):
self._values = {}
self.results = {}
self.times = []
def __str__(self):
return "<epynet."+self.__class__.__name__ + " with id '" + self.uid + "'>"
def __getattr__(self, name):
if name in self.static_properties.keys():
return self.get_property(self.static_properties[name])
elif name in self.properties.keys():
if not self.network().solved:
warnings.warn("requesting dynamic properties from an unsolved network")
if self.results == {}:
return self.get_property(self.properties[name])
else:
return pd.Series(self.results[name], index=self.times)
else:
raise AttributeError('Nonexistant Attribute', name)
def __setattr__(self, name, value):
if name in self.properties.keys():
raise AttributeError("Illegal Assignment to Computed Value")
if name in self.static_properties.keys():
self.set_static_property(self.static_properties[name], value)
else:
super(BaseObject, self).__setattr__(name, value)
def set_static_property(self, code, value):
# set network as unsolved
self.network().solved = False
self._values[code] = value
self.set_object_value(code, value)
def get_property(self, code):
if code not in self._values.keys():
self._values[code] = self.get_object_value(code)
return self._values[code]
|
{
"content_hash": "e5360453ce36a22d9c9eb1bdfc99bd93",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 87,
"avg_line_length": 29.344444444444445,
"alnum_prop": 0.5876561908368042,
"repo_name": "Vitens/epynet",
"id": "28c0a40a68c9b1e9e34aa14ccfd1c0ec7452fd45",
"size": "2641",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "epynet/baseobject.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "95004"
}
],
"symlink_target": ""
}
|
from ctypes import *
class onion_amount(Union):
_fields_ = [
("brown_long", c_long),
("brown_int", c_int),
("brown_char", c_char * 8)
]
value = raw_input("Enter the number of onions to union:")
onions = onion_amount(int(value))
print "Onion amount as long: %ld" % onions.brown_long
print "Onion amount as int: %d" % onions.brown_int
print "Onion amount as char: %s" % onions.brown_char
|
{
"content_hash": "c49fdaa0d4130b319fd73031532f916a",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 57,
"avg_line_length": 28.066666666666666,
"alnum_prop": 0.6270783847980997,
"repo_name": "JordanRobinson/books",
"id": "fcef32e83ef59bb6470f112dd8d15e96f7b2a39d",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gray-hat-python/src/chapter1-unions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Clojure",
"bytes": "3402"
},
{
"name": "Python",
"bytes": "17127"
}
],
"symlink_target": ""
}
|
from sqlalchemy import Column
from sqlalchemy.orm import relationship
from . import Base
__all__ = ['Habits']
class Habits(Base):
__tablename__ = 'habits'
name = Column()
habit_groups = relationship('Routines', back_populates='habit')
attempts_logs = relationship('AttemptsLogs', back_populates='habit')
def __init__(self, name):
"""
Habits model.
Parameters
----------
name : str
Name.
Attributes
----------
id : int
Unique identifier.
name : str
Name.
habit_groups : list of mfit.models.Routines
Collection of Routines entities.
attempts_logs : list of mfit.models.AttemptsLogs
Collection of Attempts Logs entities.
created_at : datetime.datetime
When the entity was originally created.
created_by : int
Who originally created the entity.
updated_at : datetime.datetime
When the entity was last updated.
updated_by : int
Who last updated the entity.
"""
self.name = name
def __repr__(self):
repr_ = '{}(name="{}")'
return repr_.format(self.__class__.__name__, self.name)
|
{
"content_hash": "d43807ea4072b20dcb4066d15cd97233",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 72,
"avg_line_length": 23.9811320754717,
"alnum_prop": 0.5546813532651456,
"repo_name": "dnguyen0304/mfit",
"id": "a733e9b448b8b05d60dc829511b5c19eb611c337",
"size": "1296",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "mfit/mfit/models/habits.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PLpgSQL",
"bytes": "6724"
},
{
"name": "Protocol Buffer",
"bytes": "517"
},
{
"name": "Python",
"bytes": "68209"
},
{
"name": "Shell",
"bytes": "696"
}
],
"symlink_target": ""
}
|
from copy import deepcopy
import os
import StringIO
import subprocess
import textwrap
import urllib2
import requests
import six
import yaml
import mock
from mock import patch
from fuel_upgrade import errors
from fuel_upgrade.tests.base import BaseTestCase
from fuel_upgrade import utils
from fuel_upgrade.utils import create_dir_if_not_exists
from fuel_upgrade.utils import exec_cmd
from fuel_upgrade.utils import exec_cmd_iterator
from fuel_upgrade.utils import get_request
from fuel_upgrade.utils import http_retry
from fuel_upgrade.utils import sanitize
from fuel_upgrade.utils import topological_sorting
from fuel_upgrade.utils import wait_for_true
class TestUtils(BaseTestCase):
def make_process_mock(self, return_code=0):
process_mock = mock.Mock()
process_mock.stdout = ['Stdout line 1', 'Stdout line 2']
process_mock.returncode = return_code
return process_mock
def test_exec_cmd_executes_sucessfuly(self):
cmd = 'some command'
process_mock = self.make_process_mock()
with patch.object(
subprocess, 'Popen', return_value=process_mock) as popen_mock:
exec_cmd(cmd)
popen_mock.assert_called_once_with(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
shell=True)
@mock.patch('fuel_upgrade.utils.exec_cmd',
side_effect=errors.ExecutedErrorNonZeroExitCode())
def test_safe_exec_cmd(self, exec_mock):
cmd = 'some command'
utils.safe_exec_cmd(cmd)
exec_mock.assert_called_once_with(cmd)
def test_exec_cmd_raises_error_in_case_of_non_zero_exit_code(self):
cmd = 'some command'
return_code = 1
process_mock = self.make_process_mock(return_code=return_code)
with patch.object(subprocess, 'Popen', return_value=process_mock):
self.assertRaisesRegexp(
errors.ExecutedErrorNonZeroExitCode,
'Shell command executed with "{0}" '
'exit code: {1} '.format(return_code, cmd),
exec_cmd, cmd)
def test_exec_cmd_iterator_executes_sucessfuly(self):
cmd = 'some command'
process_mock = self.make_process_mock()
with patch.object(
subprocess, 'Popen', return_value=process_mock) as popen_mock:
for line in exec_cmd_iterator(cmd):
self.assertTrue(line.startswith('Stdout line '))
popen_mock.assert_called_once_with(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True)
def test_exec_cmd_iterator_raises_error_in_case_of_non_zero_exit_code(
self):
cmd = 'some command'
return_code = 1
process_mock = self.make_process_mock(return_code=return_code)
with patch.object(subprocess, 'Popen', return_value=process_mock):
with self.assertRaisesRegexp(
errors.ExecutedErrorNonZeroExitCode,
'Shell command executed with "{0}" '
'exit code: {1} '.format(return_code, cmd)):
for line in exec_cmd_iterator(cmd):
self.assertTrue(line.startswith('Stdout line '))
def test_get_request(self):
url = 'http://some-url.com/path'
response = mock.MagicMock()
response.read.return_value = '{"key": "value"}'
response.getcode.return_value = 200
with patch.object(
urllib2, 'urlopen', return_value=response) as urlopen:
resp = get_request(url)
self.assertEqual(({'key': 'value'}, 200), resp)
urlopen.assert_called_once_with(url)
def test_topological_sorting(self):
graph = {
'D': ['C', 'G'],
'E': ['A', 'D'],
'A': [],
'B': ['A'],
'C': ['A'],
'G': []
}
order = topological_sorting(graph)
self.assertEqual(order, ['A', 'B', 'C', 'G', 'D', 'E'])
def test_topological_sorting_raises_cycle_dependencies_error(self):
graph = {
'A': ['C', 'D'],
'B': ['A'],
'C': ['B'],
'D': []
}
self.assertRaisesRegexp(
errors.CyclicDependenciesError,
"Cyclic dependencies error ",
topological_sorting,
graph)
@mock.patch('fuel_upgrade.utils.os.makedirs')
def test_create_dir_if_not_exists_does_not_create_dir(self, mock_makedirs):
path = 'some_path'
with mock.patch(
'fuel_upgrade.utils.os.path.isdir',
return_value=True) as mock_isdir:
create_dir_if_not_exists(path)
mock_isdir.assert_called_once_with(path)
self.method_was_not_called(mock_makedirs)
@mock.patch('fuel_upgrade.utils.os.makedirs')
def test_create_dir_if_not_exists(self, mock_makedirs):
path = 'some_path'
with mock.patch(
'fuel_upgrade.utils.os.path.isdir',
return_value=False) as mock_isdir:
create_dir_if_not_exists(path)
mock_isdir.assert_called_once_with(path)
mock_makedirs.assert_called_once_with(path)
def test_wait_for_true_does_not_raise_errors(self):
self.assertEqual(wait_for_true(lambda: True, timeout=0), True)
def test_wait_for_true_raises_timeout_error(self):
self.assertRaisesRegexp(
errors.TimeoutError,
'Failed to execute command with timeout 0',
wait_for_true,
lambda: False,
timeout=0)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.copy_dir')
def test_copy_with_dir(self, copy_mock, _):
from_path = '/from_path'
to_path = '/to_path'
utils.copy(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path, True, True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.copy_file')
def test_copy_with_file(self, copy_mock, _):
from_path = '/from_path'
to_path = '/to_path'
utils.copy(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path, True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.shutil.copy')
def test_copy_file(self, copy_mock, _):
from_path = '/from_path.txt'
to_path = '/to_path.txt'
utils.copy_file(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.copy')
def test_copy_file_to_dir(self, copy_mock, _):
from_path = '/from_path.txt'
to_path = '/to_path'
utils.copy_file(from_path, to_path)
copy_mock.assert_called_once_with(from_path, '/to_path/from_path.txt')
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.copy')
def test_copy_file_do_not_overwrite(self, copy_mock, _, __):
from_path = '/from_path.txt'
to_path = '/to_path.txt'
utils.copy_file(from_path, to_path, overwrite=False)
self.method_was_not_called(copy_mock)
@mock.patch('fuel_upgrade.utils.shutil.copytree')
def test_copy_dir(self, copy_mock):
from_path = '/from_path'
to_path = '/to_path'
utils.copy_dir(from_path, to_path)
copy_mock.assert_called_once_with(from_path, to_path, symlinks=True)
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.copytree')
@mock.patch('fuel_upgrade.utils.remove')
def test_copy_dir_overwrite(self, rm_mock, copy_mock, _):
from_path = '/from_path'
to_path = '/to_path'
utils.copy_dir(from_path, to_path)
rm_mock.assert_called_once_with(to_path, ignore_errors=True)
copy_mock.assert_called_once_with(from_path, to_path, symlinks=True)
def test_file_contains_lines_returns_true(self):
with mock.patch(
'__builtin__.open',
self.mock_open("line 1\n line2\n line3")):
self.assertTrue(
utils.file_contains_lines('/some/path', ['line 1', 'line3']))
def test_file_contains_lines_returns_false(self):
with mock.patch(
'__builtin__.open',
self.mock_open("line 1\n line2\n line3")):
self.assertFalse(
utils.file_contains_lines('/some/path', ['line 4', 'line3']))
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.symlink')
@mock.patch('fuel_upgrade.utils.remove')
def test_symlink(self, remove_mock, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink(from_path, to_path)
symlink_mock.assert_called_once_with(from_path, to_path)
remove_mock.assert_called_once_with(to_path)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.os.symlink')
@mock.patch('fuel_upgrade.utils.remove')
def test_symlink_no_exist(self, remove_mock, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink(from_path, to_path)
symlink_mock.assert_called_once_with(from_path, to_path)
self.called_once(remove_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.symlink')
def test_symlink_if_src_exists_ok(self, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink_if_src_exists(from_path, to_path)
symlink_mock.assert_called_once_with(from_path, to_path,
overwrite=True)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.symlink')
def test_symlink_if_src_exists_not_exists(self, symlink_mock, _):
from_path = '/tmp/from/path'
to_path = '/tmp/to/path'
utils.symlink_if_src_exists(from_path, to_path)
self.method_was_not_called(symlink_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_if_exists(self, remove_mock, exists_mock):
path = '/tmp/some/path'
utils.remove_if_exists(path)
remove_mock.assert_called_once_with(path)
exists_mock.assert_called_once_with(path)
def test_load_fixture(self):
fixture = StringIO.StringIO('''
- &base
fields:
a: 1
b: 2
c: 3
- pk: 1
extend: *base
fields:
a: 13
- pk: 2
extend: *base
fields:
d: 42
''')
setattr(fixture, 'name', 'some.yaml')
result = utils.load_fixture(fixture)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], {
'a': 13,
'b': 2,
'c': 3,
})
self.assertEqual(result[1], {
'a': 1,
'b': 2,
'c': 3,
'd': 42,
})
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.rmtree')
def test_rmtree(self, rm_mock, exists_mock):
path = '/some/file/path'
utils.rmtree(path)
rm_mock.assert_called_once_with(path, ignore_errors=True)
exists_mock.assert_called_once_with(path)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.shutil.rmtree')
def test_rmtree_no_errors_if_file_does_not_exist(
self, rm_mock, exists_mock):
path = '/some/file/path'
utils.rmtree(path)
self.method_was_not_called(rm_mock)
exists_mock.assert_called_once_with(path)
def test_check_file_is_valid_json(self):
path = '/path/to/file.json'
with mock.patch(
'__builtin__.open',
self.mock_open('{"valid": "json"}')):
self.assertTrue(utils.check_file_is_valid_json(path))
def test_check_file_is_valid_json_returns_false(self):
path = '/path/to/file.json'
with mock.patch(
'__builtin__.open',
self.mock_open('{"invalid: "json"}')):
self.assertFalse(utils.check_file_is_valid_json(path))
def test_check_file_is_valid_json_false_if_problems_with_access(self):
path = '/path/to/file.json'
with mock.patch('__builtin__.open', side_effect=IOError()):
self.assertFalse(utils.check_file_is_valid_json(path))
def test_byte_to_megabyte(self):
self.assertEqual(utils.byte_to_megabyte(0), 0)
self.assertEqual(utils.byte_to_megabyte(1048576), 1)
def test_calculate_free_space(self):
dev_info = mock.Mock()
dev_info.f_bsize = 1048576
dev_info.f_bavail = 2
with mock.patch('fuel_upgrade.utils.os.statvfs',
return_value=dev_info) as st_mock:
self.assertEqual(utils.calculate_free_space('/tmp/dir'), 2)
st_mock.assert_called_once_with('/tmp/dir/')
@mock.patch('fuel_upgrade.utils.os.path.ismount',
side_effect=[False, False, True])
def test_find_mount_point(self, mock_ismount):
path = '/dir1/dir2/dir3/dir4'
self.assertEqual(utils.find_mount_point(path), '/dir1/dir2')
self.called_times(mock_ismount, 3)
@mock.patch('fuel_upgrade.utils.os.path.getsize', return_value=1048576)
@mock.patch('fuel_upgrade.utils.os.walk',
return_value=[('', '', ['file1', 'file2'])])
@mock.patch('fuel_upgrade.utils.os.path.isfile',
return_value=True)
def test_dir_size(self, _, __, ___):
path = '/path/dir'
self.assertEqual(utils.dir_size(path), 2)
@mock.patch('fuel_upgrade.utils.os.path.getsize', return_value=1048576)
@mock.patch('fuel_upgrade.utils.os.path.isfile', return_value=True)
def test_files_size(self, _, __):
path = ['/path/file1', '/path/file2']
self.assertEqual(utils.files_size(path), 2)
def test_compare_version(self):
self.assertEqual(utils.compare_version('0.1', '0.2'), 1)
self.assertEqual(utils.compare_version('0.1', '0.1.5'), 1)
self.assertEqual(utils.compare_version('0.2', '0.1'), -1)
self.assertEqual(utils.compare_version('0.2', '0.2'), 0)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_does_not_exist_file_exists(self, copy_mock, exists_mock):
utils.copy_if_does_not_exist('from', 'to')
exists_mock.assert_called_once_with('to')
self.method_was_not_called(copy_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_does_not_exist_file_does_not_exist(
self, copy_mock, exists_mock):
utils.copy_if_does_not_exist('from', 'to')
exists_mock.assert_called_once_with('to')
copy_mock.assert_called_once_with('from', 'to')
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_exists_file_does_not_exist(
self, copy_mock, exists_mock):
utils.copy_if_exists('from', 'to')
exists_mock.assert_called_once_with('from')
self.method_was_not_called(copy_mock)
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.copy')
def test_copy_if_exists_file_exists(
self, copy_mock, exists_mock):
utils.copy_if_exists('from', 'to')
exists_mock.assert_called_once_with('from')
copy_mock.assert_called_once_with('from', 'to')
@mock.patch('fuel_upgrade.utils.os.rename')
def test_rename(self, rename_mock):
utils.rename('source', 'destination')
rename_mock.assert_called_once_with('source', 'destination')
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_file(self, remove_mock, _, __):
utils.remove('path')
remove_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.islink', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_link_to_dir(self, remove_mock, _, __, ___):
utils.remove('path')
remove_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=False)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=False)
@mock.patch('fuel_upgrade.utils.os.remove')
def test_remove_file_does_not_exist(self, remove_mock, _, __):
utils.remove('path')
self.method_was_not_called(remove_mock)
@mock.patch('fuel_upgrade.utils.os.path.lexists', return_value=True)
@mock.patch('fuel_upgrade.utils.os.path.isdir', return_value=True)
@mock.patch('fuel_upgrade.utils.shutil.rmtree')
def test_remove_dir(self, remove_mock, _, __):
utils.remove('path')
remove_mock.assert_called_once_with('path', ignore_errors=True)
@mock.patch('fuel_upgrade.utils.yaml')
def test_save_as_yaml(self, yaml_mock):
path = '/tmp/path'
data = {'a': 'b'}
mock_open = self.mock_open('')
with mock.patch('__builtin__.open', mock_open):
utils.save_as_yaml(path, data)
yaml_mock.dump.assert_called_once_with(data, default_flow_style=False)
@mock.patch('fuel_upgrade.utils.yaml')
def test_read_from_yaml(self, yaml_mock):
path = '/tmp/path'
data = yaml.dump({'a': 'b'})
mock_open = self.mock_open(data)
with mock.patch('fuel_upgrade.utils.io.open', mock_open):
utils.read_from_yaml(path)
yaml_mock.load.assert_called_once_with(data)
def test_generate_uuid_string(self):
random_string = utils.generate_uuid_string()
self.assertEqual(len(random_string), 36)
self.assertTrue(isinstance(random_string, str))
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
@mock.patch('fuel_upgrade.utils.file_contains_lines', returns_value=True)
def test_verify_postgres_dump(self, file_contains_mock, exists_mock):
pg_dump_path = '/tmp/some/path'
utils.verify_postgres_dump(pg_dump_path)
patterns = [
'-- PostgreSQL database cluster dump',
'-- PostgreSQL database dump',
'-- PostgreSQL database dump complete',
'-- PostgreSQL database cluster dump complete']
exists_mock.assert_called_once_with(pg_dump_path)
file_contains_mock.assert_called_once_with(pg_dump_path, patterns)
def test_file_extension(self):
cases = [
('', ''),
('asdf', ''),
('asdf.', ''),
('asdf.txt', 'txt'),
('asdf.txt.trtr', 'trtr')]
for case in cases:
self.assertEqual(utils.file_extension(case[0]), case[1])
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=True)
def test_file_exists_returns_true(self, exists_mock):
self.assertTrue(utils.file_exists('path'))
exists_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.path.exists', return_value=False)
def test_file_exists_returns_false(self, exists_mock):
self.assertFalse(utils.file_exists('path'))
exists_mock.assert_called_once_with('path')
@mock.patch('fuel_upgrade.utils.os.walk')
def test_iterfiles(self, walk):
for _ in utils.iterfiles('path/to/dir'):
pass
walk.assert_called_once_with('path/to/dir', topdown=True)
@mock.patch('fuel_upgrade.utils.os.walk')
def test_iterfiles_filter(self, walk):
expected_files = ['/fake/path/1', '/fake/path/2']
walk.return_value = [('/fake/path', '', '1'), ('/fake/path', '', '2')]
files = list(utils.iterfiles_filter('/fake/path', '*1'))
self.assertEqual(files, expected_files[:1])
def test_render_template_to_file(self):
template_path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '../templates/nailgun.repo'))
with open(template_path, 'r') as f:
template = f.read()
mopen = mock.mock_open(read_data=template)
with mock.patch('__builtin__.open', mopen, create=True):
utils.render_template_to_file('mocked', 'mocked', {
'name': 'mos7.0-updates',
'baseurl': 'http://mirror.fuel-infra.org/mos-repos/centos/'
'mos7.0-centos6-fuel/updates/x86_64/',
'gpgcheck': 0,
'skip_if_unavailable': 1,
})
mopen().write.assert_called_once(textwrap.dedent('''\
[mos7.0-updates]
name=mos7.0-updates
baseurl=http://mirror.fuel-infra.org/mos-repos\
/centos/mos7.0-centos6-fuel/updates/x86_64/
gpgcheck=0
skip_if_unavailable=1
'''))
class TestVersionedFile(BaseTestCase):
def setUp(self):
self.path = '/tmp/path.ext'
self.versioned_file = utils.VersionedFile(self.path)
@mock.patch('fuel_upgrade.utils.glob.glob', return_value=[])
def test_next_file_name_empty_dir(self, _):
self.assertEqual(
self.versioned_file.next_file_name(),
'{0}.1'.format(self.path))
@mock.patch('fuel_upgrade.utils.glob.glob',
return_value=['/tmp/path.ext',
'/tmp/path.ext.10',
'/tmp/path.ext.6'])
def test_next_file_name_with_files(self, _):
self.assertEqual(
self.versioned_file.next_file_name(),
'{0}.11'.format(self.path))
@mock.patch('fuel_upgrade.utils.glob.glob',
return_value=['/tmp/path.ext',
'/tmp/path.ext.10',
'/tmp/path.ext.6'])
def test_sorted_files(self, _):
self.assertEqual(
self.versioned_file.sorted_files(),
['/tmp/path.ext.10', '/tmp/path.ext.6'])
def test_normversion(self):
cases = [
# (input, output)
('6', '6.0.0'),
('6.0', '6.0.0'),
('6.1', '6.1.0'),
('6.1.0', '6.1.0'),
('6.1.1', '6.1.1'),
('6.1.1.1', '6.1.1.1'),
]
for input_, output in cases:
self.assertEqual(utils.normversion(input_), output)
class TestSanitizer(BaseTestCase):
original = {
'admin_password': 'r00tme',
'not_a_pass': 1,
'nested': {
'password_here': 'JuYReDm4',
'nested_again': [
'apassword!',
'jcqOyKEf',
{'login': 'root', 'a_password': '8xMflcaD', 'x': 55.2},
{
'and_again': {
'UPPERCASE_PASSWORD': 'VpE8gqKN',
'password_as_list': ['it', 'will', 'be', 'changed'],
'admin_token': 'Ab8ph9qO'
}
}
]
}
}
expected = {
'admin_password': '******',
'not_a_pass': 1,
'nested': {
'password_here': '******',
'nested_again': [
'apassword!',
'jcqOyKEf',
{'login': 'root', 'a_password': '******', 'x': 55.2},
{
'and_again': {
'UPPERCASE_PASSWORD': 'VpE8gqKN',
'password_as_list': '******',
'admin_token': '******'
}
}
]
}
}
expected_custom_mask = {
'admin_password': 'XXX',
'not_a_pass': 1,
'nested': {
'password_here': 'XXX',
'nested_again': [
'apassword!',
'jcqOyKEf',
{'login': 'root', 'a_password': 'XXX', 'x': 55.2},
{
'and_again': {
'UPPERCASE_PASSWORD': 'VpE8gqKN',
'password_as_list': 'XXX',
'admin_token': 'XXX'
}
}
]
}
}
def test_hide_data(self):
self.assertEqual(
sanitize(self.original, ['password', 'token']),
self.expected
)
def test_original_object_unchanged(self):
copy_conf = deepcopy(self.original)
sanitize(self.original, ['password', 'token'])
self.assertEqual(self.original, copy_conf)
def test_custom_mask(self):
self.assertEqual(
sanitize(self.original, ['password', 'token'], mask='XXX'),
self.expected_custom_mask
)
@mock.patch('fuel_upgrade.utils.time.sleep')
class TestHttpRetry(BaseTestCase):
def _get_http_error(self, error_code):
response = mock.Mock(status_code=error_code)
return requests.HTTPError(response=response)
def test_do_not_retry_on_not_interesting_errors(self, msleep):
method = mock.Mock(
side_effect=self._get_http_error(404),
__name__='fn')
wrapped_method = http_retry(status_codes=[500])(method)
self.assertRaises(requests.HTTPError, wrapped_method)
self.called_once(method)
self.method_was_not_called(msleep)
def test_do_retry_on_interesting_errors(self, msleep):
method = mock.Mock(
side_effect=self._get_http_error(500),
__name__='fn')
wrapped_method = http_retry(status_codes=[500], attempts=13)(method)
self.assertRaises(requests.HTTPError, wrapped_method)
self.called_times(method, 13)
self.called_times(msleep, 12)
def test_do_sleep_on_attempts(self, msleep):
method = mock.Mock(
side_effect=self._get_http_error(500),
__name__='fn')
wrapped_method = http_retry(
status_codes=[500], attempts=2, interval=42)(method)
self.assertRaises(requests.HTTPError, wrapped_method)
msleep.assert_called_once_with(42)
def test_decorated_method_use_arguments(self, _):
method = mock.Mock(__name__='fn')
wrapped_method = http_retry(status_codes=[500])(method)
wrapped_method(42, 'test')
method.assert_called_once_with(42, 'test')
def test_stop_retrying_if_success(self, msleep):
method = mock.Mock(
side_effect=[self._get_http_error(500), 'return value'],
__name__='fn')
wrapped_method = http_retry(status_codes=[500], attempts=13)(method)
result = wrapped_method()
self.assertEqual(result, 'return value')
self.called_times(method, 2)
self.called_once(msleep)
class TestGetNonUnique(BaseTestCase):
def test_get_duplicates(self):
self.assertItemsEqual([2, 3], utils.get_non_unique([2, 2, 2, 3, 3, 1]))
def test_empty_if_no_duplicates(self):
self.assertEqual([], list(utils.get_non_unique(six.moves.range(3))))
def test_empty_if_empty_input(self):
self.assertEqual([], list(utils.get_non_unique([])))
|
{
"content_hash": "f9004fcf4185aa41a263ba52149915b4",
"timestamp": "",
"source": "github",
"line_count": 772,
"max_line_length": 79,
"avg_line_length": 36.32772020725388,
"alnum_prop": 0.5798538063825994,
"repo_name": "SmartInfrastructures/fuel-web-dev",
"id": "7f73163d508ad131baf71e7867cb0c0513763111",
"size": "28680",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fuel_upgrade_system/fuel_upgrade/fuel_upgrade/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "91131"
},
{
"name": "HTML",
"bytes": "7949"
},
{
"name": "JavaScript",
"bytes": "945307"
},
{
"name": "Mako",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "3961568"
},
{
"name": "Ruby",
"bytes": "14701"
},
{
"name": "Shell",
"bytes": "24392"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
setup(
name = "python-django-horizon-sina",
version = "2013.1",
description = ("A sina auth plugin for django-horizon."),
maintainer = "Yingjun Li",
maintainer_email = 'liyingjun1988@gmail.com',
license = "Apache 2.0",
keywords = "sina django",
url = "http://packages.python.org/an_example_pypi_project",
packages = ['horizon.sina',
'horizon.tencent',
'horizon.common'],
long_description = ("A sina auth plugin for django-horizon."),
)
|
{
"content_hash": "7b831f49bb8c04f1ba08a81402998fd2",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 32.23529411764706,
"alnum_prop": 0.6259124087591241,
"repo_name": "foolself/python-django-horizon-sina",
"id": "76ffd45cf47bcacc4395ed78fb30e3a5efa33ae6",
"size": "548",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "2215"
},
{
"name": "Python",
"bytes": "25602"
}
],
"symlink_target": ""
}
|
import argparse
import math
import re
class Experiment(object):
def __init__(self, commit):
self.commit = commit
self.outputs = []
class Results(dict):
def __init__(self, *args, **kwargs):
super(Results, self).__init__(*args, **kwargs)
class LogReader(object):
def __init__(self, fname):
with open(fname, 'r') as f:
self.lines = f.read().split('\n')
self.current_line = 0
def wait_for_line(self, pattern):
p = re.compile(pattern)
n = len(self.lines)
i = self.current_line
while i < n and not p.match(self.lines[i].strip()):
i = i + 1
return i
def parse_trial(self, max_i):
output = Results()
n = len(self.lines)
j = self.wait_for_line('(NO_MADVISE)|(WITH_MADVISE)')
if j >= max_i:
return None
else:
self.current_line = j
if self.lines[self.current_line].strip() == 'WITH_MADVISE':
output.with_madvise = True
else:
output.with_madvise = False
self.current_line = self.wait_for_line('TRIAL \d+')
if self.current_line >= n: return None
self.current_line = self.wait_for_line('==========BEFORE VM STAT==========')
if self.current_line >= n: return None
j = self.wait_for_line('==========LLAMA OUTPUT==========')
if j >= n: return None
for line in self.lines[self.current_line+1: j]:
parts = line.split(':')
if len(parts) == 2:
output['before ' + parts[0].strip()] = parts[1].strip()
self.current_line = j
j = self.wait_for_line('==========AFTER VM STAT==========')
if j >= n: return None
for line in self.lines[self.current_line+1: j]:
parts = line.split(':')
if len(parts) == 2:
output[parts[0].strip()] = parts[1].strip()
self.current_line = j
j = self.wait_for_line('==========END LLAMA OUTPUT==========')
if j >= n: return None
for line in self.lines[self.current_line+1: j]:
parts = line.split(':')
if len(parts) == 2:
output['after ' + parts[0].strip()] = parts[1].strip()
self.current_line = j+1
return output
def parse(self):
experiments = []
n = len(self.lines)
while self.current_line < n:
self.current_line = self.wait_for_line('==========START EXPERIMENT==========')
if self.current_line >= n:
break
a = Experiment(self.lines[self.current_line+1])
self.current_line += 2
next_expt = self.wait_for_line('==========START EXPERIMENT==========')
while True:
if self.current_line >= next_expt:
break
output = self.parse_trial(next_expt)
if output:
a.outputs.append(output)
else:
break
experiments.append(a)
return experiments
def floatify(s):
return float(s.split()[0])
def mean(ls):
if ls:
return sum(ls)/len(ls)
return 0
def stddev(ls):
if ls:
return math.sqrt(mean([l**2 for l in ls])-mean(ls)**2)
return 0
def main():
parser = argparse.ArgumentParser(description='Analyze LLAMA benchmark log files')
parser.add_argument('logfile', type=str,
help='Log file to analyze')
parser.add_argument('-o', '--output', type=str,
default='output.csv',
help='Output csv file')
args = parser.parse_args()
log_reader = LogReader(args.logfile)
experiments = log_reader.parse()
with open(args.output, 'w') as output:
output.write('Time (no madvise; s),Time (madvise; s)\n')
for e in experiments:
nm_time = [floatify(k['Time']) for k in e.outputs
if not k.with_madvise]
wm_time = [floatify(k['Time']) for k in e.outputs
if k.with_madvise]
output.write('%s,%s\n' % (
mean(nm_time),
mean(wm_time),
))
if __name__=='__main__':
main()
|
{
"content_hash": "a297a70cdf6b3b686c8fea464b4ca388",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 90,
"avg_line_length": 29.916083916083917,
"alnum_prop": 0.5002337540906966,
"repo_name": "fding/llama",
"id": "371668e3e3e742f105b31c8b836aeb0d0a9c84e6",
"size": "4278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analyze_sequential.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "3462"
},
{
"name": "C++",
"bytes": "858194"
},
{
"name": "Makefile",
"bytes": "9788"
},
{
"name": "Python",
"bytes": "44646"
},
{
"name": "Shell",
"bytes": "22826"
}
],
"symlink_target": ""
}
|
"""The version component."""
|
{
"content_hash": "b2c2910e0d2dfbb6537861c259283662",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 28,
"avg_line_length": 29,
"alnum_prop": 0.6551724137931034,
"repo_name": "molobrakos/home-assistant",
"id": "eb257007f7cc20cc7d0525b563554c46341ae379",
"size": "29",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "homeassistant/components/version/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "407"
},
{
"name": "Python",
"bytes": "15057917"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
}
|
"""Options for BigMLer source processing
"""
def get_source_options(defaults=None):
"""source-related options
"""
if defaults is None:
defaults = {}
options = {
# Path to the training set.
'--train': {
"action": 'store',
"dest": 'training_set',
'nargs': '?',
"default": defaults.get('train', None),
"help": "Training set path."},
# If a BigML source is provided, the script won't create a new one
'--source': {
"action": 'store',
"dest": 'source',
"default": defaults.get('source', None),
"help": "BigML source Id."},
# If a BigML json file containing a source structure is provided,
# the script will use it.
'--source-file': {
'action': 'store',
'dest': 'source_file',
'default': defaults.get('source_file', None),
'help': "BigML source JSON structure file."},
# The path to a file containing names if you want to alter BigML's
# default field names or the ones provided by the train file header.
# Kept for backwards compatibility
'--field-names': {
'action': 'store',
'dest': 'field_attributes',
'default': defaults.get('field_names', None),
'help': ("Path to a csv file describing field names. One"
" definition per line (e.g., 0,'Last Name').")},
# The path to a file containing attributes if you want to alter BigML's
# default field attributes or the ones provided by the train file
# header.
'--field-attributes': {
'action': 'store',
'dest': 'field_attributes',
'default': defaults.get('field_attributes', None),
'help': ("Path to a csv file describing field attributes."
" One definition per line"
" (e.g., 0,'Last Name').")},
# The path to a file containing types if you want to alter BigML's
# type auto-detect.
'--types': {
'action': 'store',
'dest': 'types',
'default': defaults.get('types', None),
'help': ("Path to a file describing field types. One"
" definition per line (e.g., 0, 'numeric').")},
# Set when the training set file doesn't include a header on the first
# line.
'--no-train-header': {
'action': 'store_false',
'dest': 'train_header',
'default': defaults.get('train_header', True),
'help': "The train set file hasn't a header."},
# Set when the training set file does include a header on the first
# line. (opposed to --no-train-header)
'--train-header': {
'action': 'store_true',
'dest': 'train_header',
'default': defaults.get('train_header', True),
'help': "The train set file has a header."},
# Shows progress information when uploading a file.
'--progress-bar': {
'action': 'store_true',
'dest': 'progress_bar',
'default': defaults.get('progress_bar', False),
'help': "Show progress details when creating a source."},
# Locale settings.
'--locale': {
'action': 'store',
'dest': 'user_locale',
'default': defaults.get('locale', None),
'help': "Chosen locale code string."},
# The path to a file containing source attributes.
'--source-attributes': {
'action': 'store',
'dest': 'source_attributes',
'default': defaults.get('source_attributes', None),
'help': ("Path to a json file describing source"
" attributes.")},
# Hides progress information when uploading a file. (opposed to
# --progress-bar)
'--no-progress-bar': {
'action': 'store_false',
'dest': 'progress_bar',
'default': defaults.get('progress_bar', False),
'help': "Show progress details when creating a source."},
# Training set field separator. Defaults to the locale csv
# separator.
'--training-separator': {
'action': 'store',
'dest': 'training_separator',
'default': defaults.get('training_separator', None),
'help': ("Training set field separator.")},
# Name of the project to be created and/or used in resource creation
'--project': {
'action': 'store',
'dest': 'project',
'default': defaults.get('project', None),
'help': ("Name of the project to be created and/or used in"
"resource creation.")},
# Id of the project to be used in source creation
'--project-id': {
'action': 'store',
'dest': 'project_id',
'default': defaults.get('project_id', None),
'help': ("Id of the project to be used in"
"source creation.")}}
return options
|
{
"content_hash": "11bbaff1d5b12477d2542e9c56f139f2",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 79,
"avg_line_length": 38.67910447761194,
"alnum_prop": 0.5199691298475786,
"repo_name": "brokendata/bigmler",
"id": "ed9a38013d4e8d5fd87bb0236f0c58d0089113e2",
"size": "5785",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bigmler/options/source.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "25598"
},
{
"name": "Python",
"bytes": "965199"
}
],
"symlink_target": ""
}
|
import hashlib
import logging
import time
import json
import requests
logger = logging.getLogger(__name__)
class QobuzAPI(object):
APP_ID = '214748364'
APP_SECRET = '6fdcbccb7a073f35fbd16a193cdef6c4'
FLAC_FORMAT_ID = 6
def __init__(self, username, password, load_state, save_state):
"""
Creates a Qobuz API
username -- Qobuz username
password -- Qobuz password
load_state -- a function that returns the current state as a string or None if no state
save_state -- a function that accepts a string argument and persists it for load_state
"""
state = load_state()
self.login_data = None
if state is None:
logger.debug('There is no state, logging in.')
self.login_data = self.login(username, password)
save_state(json.dumps(self.login_data))
else:
logger.debug('Found state, looading.')
self.login_data = json.loads(state)
self.call('status/test')
def login(self, username, password):
password_hash = hashlib.md5(password).hexdigest()
return self.call('user/login', username=username, password=password_hash)
def get_request(self, endpoint, **kwargs):
headers = {
'X-App-Id': self.APP_ID,
}
if self.login_data:
headers['X-User-Auth-Token'] = self.login_data['user_auth_token']
url = 'http://www.qobuz.com/api.json/0.2/{0}'.format(endpoint)
return requests.get(url, params=kwargs, headers=headers)
def get_file_url(self, track_id, format_id=FLAC_FORMAT_ID):
track_id = str(track_id)
format_id = str(format_id)
request_ts = int(time.time())
sig = 'trackgetFileUrl{0}{1}{2}{3}'.format(
'format_id' + format_id,
'track_id' + track_id,
request_ts,
self.APP_SECRET,
)
sig_md5 = hashlib.md5(sig).hexdigest()
return self.call('track/getFileUrl', track_id=track_id, format_id=format_id,
request_ts=request_ts, request_sig=sig_md5)
def call(self, endpoint, **kwargs):
logger.debug('Calling %s params=%s', endpoint, kwargs)
response = self.get_request(endpoint, **kwargs)
response.raise_for_status()
return response.json()
|
{
"content_hash": "df3947e4ec8c0272d8a58ba085a43830",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 95,
"avg_line_length": 35.02985074626866,
"alnum_prop": 0.6003408606731998,
"repo_name": "MADindustries/WhatManager2",
"id": "2b9eea4bf31fbceb9dd74a474e82ca0460649cf5",
"size": "2347",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "qiller/qobuz_api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "202636"
},
{
"name": "HTML",
"bytes": "139705"
},
{
"name": "JavaScript",
"bytes": "632927"
},
{
"name": "Python",
"bytes": "508225"
},
{
"name": "Shell",
"bytes": "2294"
}
],
"symlink_target": ""
}
|
"""
Takes as input a 2-column (x,y) CSV file and outputs
a single 2-column (x+y,x*y) output CSV file.
"""
from argparse import FileType, ArgumentParser
import csv
import os
# In order to work with kive, scripts that have a inputs
# and b outputs must have a+b command line arguments, the first a
# arguments specifying paths of input files, the subsequent b
# arguments specifying the paths where outputs are written.
# ArgumentParser facilitates parsing inputs from sys.argv, and
# generates help messages based on the expected input specification
parser = ArgumentParser(
description="Takes CSV with (x,y), outputs CSV with (x+y),(x*y)")
parser.add_argument("input_csv",
type=FileType('rU'),
help="CSV containing (x,y) pairs")
parser.add_argument("output_csv",
type=FileType('wb'),
help="CSV containing (x+y,xy) pairs")
args = parser.parse_args()
reader = csv.DictReader(args.input_csv)
writer = csv.DictWriter(args.output_csv,
['sum', 'product'],
lineterminator=os.linesep)
writer.writeheader()
for row in reader:
x = int(row['x'])
y = int(row['y'])
writer.writerow(dict(sum=x+y, product=x*y))
|
{
"content_hash": "e644faccd2f44dfae61a0fd9bf176134",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 69,
"avg_line_length": 33.7027027027027,
"alnum_prop": 0.652766639935846,
"repo_name": "cfe-lab/Kive",
"id": "24f187c2363ab2947fb9df73991a737e4796cce9",
"size": "1271",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "samplecode/script_1_sum_and_products.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "26511"
},
{
"name": "HTML",
"bytes": "81400"
},
{
"name": "JavaScript",
"bytes": "121951"
},
{
"name": "Jinja",
"bytes": "15965"
},
{
"name": "Makefile",
"bytes": "1957"
},
{
"name": "Python",
"bytes": "1453355"
},
{
"name": "Sass",
"bytes": "15929"
},
{
"name": "Shell",
"bytes": "37562"
},
{
"name": "Singularity",
"bytes": "2941"
},
{
"name": "TypeScript",
"bytes": "356365"
}
],
"symlink_target": ""
}
|
"""
This platform provides sensors for OpenUV data.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.openuv/
"""
import logging
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.components.openuv import (
DATA_OPENUV_CLIENT, DATA_UV, DOMAIN, SENSORS, TOPIC_UPDATE,
TYPE_CURRENT_OZONE_LEVEL, TYPE_CURRENT_UV_INDEX, TYPE_CURRENT_UV_LEVEL,
TYPE_MAX_UV_INDEX, TYPE_SAFE_EXPOSURE_TIME_1, TYPE_SAFE_EXPOSURE_TIME_2,
TYPE_SAFE_EXPOSURE_TIME_3, TYPE_SAFE_EXPOSURE_TIME_4,
TYPE_SAFE_EXPOSURE_TIME_5, TYPE_SAFE_EXPOSURE_TIME_6, OpenUvEntity)
from homeassistant.util.dt import as_local, parse_datetime
DEPENDENCIES = ['openuv']
_LOGGER = logging.getLogger(__name__)
ATTR_MAX_UV_TIME = 'time'
EXPOSURE_TYPE_MAP = {
TYPE_SAFE_EXPOSURE_TIME_1: 'st1',
TYPE_SAFE_EXPOSURE_TIME_2: 'st2',
TYPE_SAFE_EXPOSURE_TIME_3: 'st3',
TYPE_SAFE_EXPOSURE_TIME_4: 'st4',
TYPE_SAFE_EXPOSURE_TIME_5: 'st5',
TYPE_SAFE_EXPOSURE_TIME_6: 'st6'
}
UV_LEVEL_EXTREME = "Extreme"
UV_LEVEL_VHIGH = "Very High"
UV_LEVEL_HIGH = "High"
UV_LEVEL_MODERATE = "Moderate"
UV_LEVEL_LOW = "Low"
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up an OpenUV sensor based on existing config."""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Nest sensor based on a config entry."""
openuv = hass.data[DOMAIN][DATA_OPENUV_CLIENT][entry.entry_id]
sensors = []
for sensor_type in openuv.sensor_conditions:
name, icon, unit = SENSORS[sensor_type]
sensors.append(
OpenUvSensor(
openuv, sensor_type, name, icon, unit, entry.entry_id))
async_add_entities(sensors, True)
class OpenUvSensor(OpenUvEntity):
"""Define a binary sensor for OpenUV."""
def __init__(self, openuv, sensor_type, name, icon, unit, entry_id):
"""Initialize the sensor."""
super().__init__(openuv)
self._async_unsub_dispatcher_connect = None
self._entry_id = entry_id
self._icon = icon
self._latitude = openuv.client.latitude
self._longitude = openuv.client.longitude
self._name = name
self._sensor_type = sensor_type
self._state = None
self._unit = unit
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def should_poll(self):
"""Disable polling."""
return False
@property
def state(self):
"""Return the status of the sensor."""
return self._state
@property
def unique_id(self) -> str:
"""Return a unique, HASS-friendly identifier for this entity."""
return '{0}_{1}_{2}'.format(
self._latitude, self._longitude, self._sensor_type)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit
async def async_added_to_hass(self):
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self.async_schedule_update_ha_state(True)
self._async_unsub_dispatcher_connect = async_dispatcher_connect(
self.hass, TOPIC_UPDATE, update)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
if self._async_unsub_dispatcher_connect:
self._async_unsub_dispatcher_connect()
async def async_update(self):
"""Update the state."""
data = self.openuv.data[DATA_UV]['result']
if self._sensor_type == TYPE_CURRENT_OZONE_LEVEL:
self._state = data['ozone']
elif self._sensor_type == TYPE_CURRENT_UV_INDEX:
self._state = data['uv']
elif self._sensor_type == TYPE_CURRENT_UV_LEVEL:
if data['uv'] >= 11:
self._state = UV_LEVEL_EXTREME
elif data['uv'] >= 8:
self._state = UV_LEVEL_VHIGH
elif data['uv'] >= 6:
self._state = UV_LEVEL_HIGH
elif data['uv'] >= 3:
self._state = UV_LEVEL_MODERATE
else:
self._state = UV_LEVEL_LOW
elif self._sensor_type == TYPE_MAX_UV_INDEX:
self._state = data['uv_max']
self._attrs.update({
ATTR_MAX_UV_TIME: as_local(parse_datetime(data['uv_max_time']))
})
elif self._sensor_type in (TYPE_SAFE_EXPOSURE_TIME_1,
TYPE_SAFE_EXPOSURE_TIME_2,
TYPE_SAFE_EXPOSURE_TIME_3,
TYPE_SAFE_EXPOSURE_TIME_4,
TYPE_SAFE_EXPOSURE_TIME_5,
TYPE_SAFE_EXPOSURE_TIME_6):
self._state = data['safe_exposure_time'][EXPOSURE_TYPE_MAP[
self._sensor_type]]
|
{
"content_hash": "40998bc785217ef6c689766551434a60",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 79,
"avg_line_length": 34.182432432432435,
"alnum_prop": 0.5961652500494169,
"repo_name": "tinloaf/home-assistant",
"id": "63527db42a6b8ea2a0da7357b4366691c6e31639",
"size": "5059",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/openuv.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1099"
},
{
"name": "Python",
"bytes": "13135313"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17137"
}
],
"symlink_target": ""
}
|
__author__ = 'Deathnerd'
|
{
"content_hash": "c7948110b984f6a8e557aae47136522f",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 24,
"avg_line_length": 25,
"alnum_prop": 0.6,
"repo_name": "Deathnerd/RPG",
"id": "cdea0ffce8188ac99bab38011f54b0afb3bc99f7",
"size": "25",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "rest_rpg/admin/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "203"
},
{
"name": "Python",
"bytes": "4537"
}
],
"symlink_target": ""
}
|
import copy
import getopt
import string
import sys
import mpfit
import Numeric
from ppgplot import *
import BonnLogger
def phot_funct_2(p, fjac=None, y=None, err=None):
model = p[0]
status = 0
return([status, (model-y)/err])
def phot_funct_1(p, fjac=None, color=None, y=None, err=None):
model = p[0] + p[1]*color
status = 0
return([status, (model-y)/err])
def phot_funct_0(p, fjac=None, airmass=None, color=None, y=None, err=None):
model = p[0] + p[1]*airmass + p[2]*color
status = 0
return([status, (model-y)/err])
def readInput(file):
f = open(file, "r")
instMagList = []
stdMagList = []
magErrList = []
colList = []
airmassList = []
for line in f.readlines():
instMag, stdMag, col, airmass, instMagErr, stdMagErr = string.split(line)
magErr = (float(instMagErr)**2. + float(stdMagErr)**2.)**0.5
magErrList.append(magErr)
instMagList.append(float(instMag))
stdMagList.append(float(stdMag))
colList.append(float(col))
airmassList.append(float(airmass))
f.close()
instMag = Numeric.array(instMagList)
stdMag = Numeric.array(stdMagList)
data = stdMag - instMag
airmass = Numeric.array(airmassList)
color = Numeric.array(colList)
magErr = Numeric.array(magErrList)
return data, airmass, color, magErr
def photCalib(data_save, airmass_save, color_save, err_save, p, sigmareject, maxSigIter=50):
save_len = len(data_save)
parinfos = [[{"value": p[0], "fixed": 0},{"value": p[1], "fixed": 0, "limited": [0,1], "limits": [-99, 0]},{"value": p[2], "fixed": 0}],[{"value": p[0], "fixed": 0},{"value": p[1], "fixed": 0}],[{"value": p[0], "fixed": 0}]]
phot_functs = [phot_funct_0, phot_funct_1, phot_funct_2]
solutions = []
for fit_type in [0,1,2]:
airmass = copy.copy(airmass_save)
color = copy.copy(color_save)
data_tmp = copy.copy(data_save)
err = copy.copy(err_save)
#first apply coefficients we are holding fixed
data = copy.copy(data_tmp)
if fit_type == 1:
for i in range(len(data_tmp)):
data[i] = data_tmp[i] - p[1]*airmass[i]
if fit_type == 2:
for i in range(len(data_tmp)):
data[i] = data_tmp[i] - p[1]*airmass[i] - p[2]*color[i]
print data_tmp[0], data[0]
data_rec = copy.copy(data)
parinfo = parinfos[fit_type]
#for j in range(len(parinfo)):
#if j in fixedList:
# print "Element", j, "is fixed at", p[j]
# parinfo[j]["fixed"] = 1
#else:
# parinfo[j]["fixed"] = 0
for i in range(maxSigIter):
old_len = len(data)
fas = [{"airmass": airmass,"color": color, "y": data, "err": err},{"color": color,"y": data, "err": err}, {"y": data, "err": err}]
fa = fas[fit_type]
phot_funct = phot_functs[fit_type]
m = mpfit.mpfit(phot_funct, functkw=fa,
parinfo=parinfo,
maxiter=1000, quiet=1)
print m.covar, m.params, m.perror
if (m.status <= 0):
print 'error message = ', m.errmsg
condition = Numeric.zeros(len(data))
break
#airmass = copy.copy(airmass_save)
#color = copy.copy(color_save)
#data = copy.copy(data_save)
#err = copy.copy(err_save)
# Compute a 3 sigma rejection criterion
#condition = preFilter(m.params, data_save, data,
# airmass_save, airmass,
# color_save, color)
params = [0,0,0]
perror = [0,0,0]
print m.params,m.perror, m.covar
if fit_type == 0:
params = copy.copy(m.params)
perror = copy.copy(m.perror)
if fit_type == 1:
params[0] = m.params[0]
params[2] = m.params[1]
params[1] = p[1]
perror[0] = m.perror[0]
perror[2] = m.perror[1]
if fit_type == 2:
params[0] = m.params[0]
params[1] = p[1]
params[2] = p[2]
perror[0] = m.perror[0]
# Compute a 3 sigma rejection criterion
print params, data_rec[0], data[0]
condition, redchisq = SigmaCond(params, data_save, data,
airmass_save, airmass,
color_save, color, err_save, err, sigmareject)
print redchisq
# Keep everything (from the full data set!) that is within
# the 3 sigma criterion
#data_sig = Numeric.compress(condition, data_save)
data = Numeric.compress(condition, data_rec)
airmass = Numeric.compress(condition, airmass_save)
color = Numeric.compress(condition, color_save)
err = Numeric.compress(condition, err_save)
new_len = len(data)
if float(new_len)/float(save_len) < 0.5:
print "Rejected more than 50% of all measurements."
print "Aborting this fit."
break
# No change
if new_len == old_len:
print "Converged! (%d iterations)" % (i+1, )
print "Kept %d/%d stars." % (new_len, save_len)
break
print params, perror, condition
meanerr = Numeric.sum(err_save)/len(err_save)
solutions.append([params, perror, redchisq, meanerr, condition])
return solutions
def SigmaCond(p, data_save, data, airmass_save, airmass, color_save, color, err_save, err, sigmareject):
if len(data_save) > 1:
#airmass = airmass[int(0.1*len(airmass)):int(0.9*len(airmass))]
#color = color[int(0.1*len(color)):int(0.9*len(color))]
#data = data[int(0.1*len(data)):int(0.9*len(data))]
mo = p[0] + p[1]*airmass + p[2]*color
mo_save = p[0] + p[1]*airmass_save + p[2]*color_save
print len(data), len(mo), len(err)
reddm = (data-mo)/err
redchisq = Numeric.sqrt(Numeric.sum(Numeric.power(reddm, 2)) / (len(reddm) - 1))
dm = data-mo
dm_save = data_save - mo_save
mean = Numeric.sum(dm)/len(dm)
sigma = Numeric.sqrt(Numeric.sum(Numeric.power(mean-dm, 2)) / (len(dm) - 1))
condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * sigma)
#condition = Numeric.less(Numeric.fabs(dm_save), float(sigmareject) * err_save)
else:
condition = Numeric.zeros(len(data_save))
return condition, redchisq
def makePlots(data, airmass, color, outfile, solutions, label):
file = outfile+".ps"
pgbeg(file+"/cps", 2, 3)
pgiden()
for i in range(3):
result = solutions[i]
# Airmass plot
pgpanl(1, i+1)
airMin = 1
airMax = Numeric.sort(airmass)[-1]*1.1
print result
dataAirMax = result[0][0]+result[0][1]+1
dataAirMin = result[0][0]+result[0][1]-1
dataColMax = result[0][0]+1
dataColMin = result[0][0]-1
colMinVal = Numeric.sort(color)[0]
if colMinVal < 0:
colMin = colMinVal*1.1
else:
colMin = colMinVal*0.95
colMax = Numeric.sort(color)[-1]*1.1
if result[0] and result[1]:
eqStr = "%d parameter fit: Mag-Mag(Inst) = %.2f\\(2233)%.2f + (%.2f\\(2233)%.2f) airmass + "\
"(%.2f\\(2233)%.2f) color" % \
(3-i, result[0][0], result[1][0], result[0][1], result[1][1], result[0][2], result[1][2])
else:
eqStr = "%d parameter fit not possible" % (3-i, )
fixenv([1, airMax] ,
[dataAirMin, dataAirMax],
eqStr, label=["Airmass", "Mag - Mag(Inst)"])
condition = result[4]
goodAirmass = Numeric.compress(condition, airmass)
goodData = Numeric.compress(condition, data)
goodColor = Numeric.compress(condition, color)
badAirmass = Numeric.compress(Numeric.logical_not(condition), airmass)
badData = Numeric.compress(Numeric.logical_not(condition), data)
badColor = Numeric.compress(Numeric.logical_not(condition), color)
if len(goodData):
pgsci(3)
# Rescale to zero color and filter for data within
# our plotting range
plotData = goodData-result[0][2]*goodColor
plotCond1 = Numeric.less(plotData, dataAirMax)
plotCond2 = Numeric.greater(plotData, dataAirMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotAirmass = Numeric.compress(plotCond, goodAirmass)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotAirmass, plotData, 5)
print type(plotAirmass), type(plotData)
if len(badData):
pgsci(2)
plotData = badData-result[0][2]*badColor
plotCond1 = Numeric.less(plotData, dataAirMax)
plotCond2 = Numeric.greater(plotData, dataAirMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotAirmass = Numeric.compress(plotCond, badAirmass)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotAirmass, plotData, 5)
pgsci(1)
a = Numeric.arange(1, airMax, 0.01)
m = result[0][0] + result[0][1] * a
pgline(a, m)
# Color Plot
pgpanl(2, i+1)
fixenv([colMin, colMax] ,
[dataColMin, dataColMax],
eqStr, label=[label, "Mag - Mag(Inst)"])
if len(goodData):
pgsci(3)
# Rescale to zero airmass and filter for data within
# our plotting range
plotData = goodData-result[0][1]*goodAirmass
plotCond1 = Numeric.less(plotData, dataColMax)
plotCond2 = Numeric.greater(plotData, dataColMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotColor = Numeric.compress(plotCond, goodColor)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotColor, plotData, 5)
if len(badData):
pgsci(2)
plotData = badData-result[0][1]*badAirmass
plotCond1 = Numeric.less(plotData, dataColMax)
plotCond2 = Numeric.greater(plotData, dataColMin)
plotCond = Numeric.logical_and(plotCond1, plotCond2)
plotColor = Numeric.compress(plotCond, badColor)
plotData = Numeric.compress(plotCond, plotData)
pgpt(plotColor, plotData, 5)
pgsci(1)
a = Numeric.arange(colMin, colMax, 0.01)
m = result[0][0] + result[0][2] * a
pgline(a, m)
return
def fixenv (xrange=[0,1], yrange=[0,1], fname="none", ci = 1, label=["x", "y"]):
# set axis ranges.
pgswin(xrange[0], xrange[1], yrange[0], yrange[1])
pgsci(ci) # set color index.
pgbox() # draw axes.
pgsci(1) # back to color index 1 (white)
pglab(label[0], label[1], fname) # label the plot
return
def saveResults(file, solutions, step, sigmareject, cluster, colorused):
f = open(file+".asc", "w")
which_solution = 0
import MySQLdb, sys, os, re
db2 = MySQLdb.connect(db='subaru', user='weaklensing', passwd='darkmatter', host='ki-rh8')
c = db2.cursor()
#c.execute("DROP TABLE IF EXISTS photometry_db")
for result in solutions:
which_solution += 1
if Numeric.sometrue(result[2]):
import os , time
user_name = os.environ['USER']
bonn_target = os.environ['BONN_TARGET']
bonn_filter = os.environ['BONN_FILTER']
time_now = time.asctime()
user = user_name #+ str(time.time())
standardstartype = os.environ['STANDARDSTARTYPE']
floatvars = {'ZP':result[0][0],'AIRMASS':result[0][1],'COLOR':result[0][2],'ZPERR':result[1][0],'AIRMASSERR':result[1][1],'COLORERR':result[1][2],'REDCHISQ':result[2],'MEANERR':result[3]}
stringvars = {'USER':user_name,'BONN_TARGET':bonn_target,'BONN_FILTER':bonn_filter,'TIME':time_now,'CHOICE':'', 'NUMBERVARS':4-which_solution,'STANDARDSTARTYPE':standardstartype,'USER': user, 'step': step, 'sigmareject':sigmareject, 'cluster':cluster,'colorused':colorused}
# make database if it doesn't exist
make_db = reduce(lambda x,y: x + ',' + y,[x + ' float(30)' for x in floatvars.keys()])
make_db += ',' + reduce(lambda x,y: x + ',' + y,[x + ' varchar(80)' for x in stringvars.keys()])
command = "CREATE TABLE IF NOT EXISTS photometry_db ( id MEDIUMINT NOT NULL AUTO_INCREMENT, PRIMARY KEY (id), " + make_db + ")"
print command
c.execute(command)
# insert new observation
names = reduce(lambda x,y: x + ',' + y, [x for x in floatvars.keys()])
values = reduce(lambda x,y: str(x) + ',' + str(y), [floatvars[x] for x in floatvars.keys()])
names += ',' + reduce(lambda x,y: x + ',' + y, [x for x in stringvars.keys()])
values += ',' + reduce(lambda x,y: x + ',' + y, ["'" + str(stringvars[x]) + "'" for x in stringvars.keys()])
command = "INSERT INTO photometry_db (" + names + ") VALUES (" + values + ")"
print command
c.execute(command)
f.write("%s %s %s\n" % (result[0][0], result[0][1], result[0][2]))
f.write("%s %s %s\n" % (result[1][0], result[1][1], result[1][2]))
f.write("%s#ReducedChiSq\n" % (result[2]))
f.write("%s#MeanError\n" % (result[3]))
f.write("%s\n" % (id))
else:
f.write("-1 -1 -1\n")
f.write("-1 -1 -1\n")
f.write("-1#ReducedChiSq\n")
f.write("-1#MeanError\n")
f.write("%s\n" % (id))
f.close
return id
def usage():
print "Usage:"
print "photo_abs.py -i input -f filter -n GABODSID - e ext. coeff. -c color coeff. -o output -l label"
print
print " -i, --input=STRING Input file, must have 4 columns: Instrumental Mag, Standard Mag, Color, Airmass"
print " -o, --output=STRING Output file basename"
print " -n, --night=INT GABODSID, unique numerical night identifier"
print " -e, --extinction=FLOAT Default value of extinction coefficient for one/two parameter fit"
print " -c, --color=FLOAT Default value of color term for one parameter fit"
print " -l, --label=STRING Label for color axis (e.g. B-V)"
print
print "Author:"
print " Joerg Dietrich <dietrich@astro.uni-bonn.de>"
print
return
if __name__ == "__main__":
__bonn_logger_id__ = BonnLogger.addCommand('maskBadOverscans.py',
sys.argv[1:])
try:
opts, args = getopt.getopt(sys.argv[1:],
"i:n:o:e:c:l:s:",
["input=", "night=", "extinction=",
"color=", "output=", "label=","sigmareject=","step=","cluster=","colorused="])
except getopt.GetoptError:
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
print sys.argv[1:]
infile = night = extcoeff = colcoeff = outfile = label = sigmareject = step = cluster = colorused = None
for o, a in opts:
if o in ("-i", "--input"):
infile = a
elif o in ("-o", "--output"):
outfile = a
elif o in ("-n", "--night"):
night = int(a)
elif o in ("-e", "--extinction"):
extcoeff = float(a)
elif o in ("-c", "--color"):
colcoeff = float(a)
elif o in ("-l", "--label"):
label = a
elif o in ("-s", "--sigmareject"):
sigmareject = float(a)
elif o in ("-t", "--step"):
step = a
elif o in ("-c", "--cluster"):
cluster = a
elif o in ("-u", "--colorused"):
colorused = a
else:
print "option:", o
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
print cluster
#raw_input()
if not infile or night==None or not outfile or \
extcoeff==None or colcoeff==None or label==None:
#print infile, night, outfile, coeff, color
usage()
BonnLogger.updateStatus(__bonn_logger_id__, 1)
sys.exit(2)
data, airmass, color, magErr = readInput(infile)
solutions = photCalib(data, airmass, color, magErr, [24, extcoeff, colcoeff], sigmareject)
makePlots(data, airmass, color, outfile, solutions, label)
saveResults(outfile, solutions, step, sigmareject, cluster, colorused)
BonnLogger.updateStatus(__bonn_logger_id__, 0)
|
{
"content_hash": "9e97399d663c117fa01407762181a4c9",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 279,
"avg_line_length": 38.09480812641083,
"alnum_prop": 0.5478786442284902,
"repo_name": "deapplegate/wtgpipeline",
"id": "7b5822f20da4e54c0d06d0eae9719c6ab701370c",
"size": "17653",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "non_essentials/photo_abs.two.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "183"
},
{
"name": "C",
"bytes": "7161"
},
{
"name": "C++",
"bytes": "65083"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Perl",
"bytes": "38992"
},
{
"name": "Python",
"bytes": "13671330"
},
{
"name": "Roff",
"bytes": "48622"
},
{
"name": "Shell",
"bytes": "3637313"
},
{
"name": "XSLT",
"bytes": "54208"
}
],
"symlink_target": ""
}
|
from sublime_plugin import WindowCommand
from ..api import deviot
from ..libraries.tools import create_sketch, get_setting, save_setting
class DeviotNewSketchCommand(WindowCommand):
def run(self):
from ..libraries.I18n import I18n
_ = I18n().translate
caption = _('caption_new_sketch')
self.window.show_input_panel(caption, '', self.location, None, None)
def location(self, name):
self.name = name
deviot.folder_explorer(callback=self.on_done)
def on_done(self, path):
create_sketch(self.name, path)
save_setting('last_path', path)
|
{
"content_hash": "aaf75dd6069efe0408b9cdc726f05038",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 76,
"avg_line_length": 27.954545454545453,
"alnum_prop": 0.6650406504065041,
"repo_name": "gepd/Deviot",
"id": "81a795ca735d0d87f6b80acffebef0f4c92359d3",
"size": "615",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "commands/deviot_new_sketch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "260"
},
{
"name": "Python",
"bytes": "541555"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
from mock import Mock, patch
from c7n_mailer import utils
class FormatStruct(unittest.TestCase):
def test_formats_struct(self):
expected = '{\n "foo": "bar"\n}'
actual = utils.format_struct({'foo': 'bar'})
self.assertEqual(expected, actual)
class ResourceFormat(unittest.TestCase):
def test_efs(self):
self.assertEqual(
utils.resource_format(
{'Name': 'abc', 'FileSystemId': 'fsid', 'LifeCycleState': 'available'},
'efs'),
'name: abc id: fsid state: available')
def test_eip(self):
self.assertEqual(
utils.resource_format(
{'PublicIp': '8.8.8.8', 'Domain': 'vpc', 'AllocationId': 'eipxyz'},
'network-addr'),
'ip: 8.8.8.8 id: eipxyz scope: vpc')
def test_nat(self):
self.assertEqual(
utils.resource_format(
{'NatGatewayId': 'nat-xyz', 'State': 'available', 'VpcId': 'vpc-123'},
'nat-gateway'),
'id: nat-xyz state: available vpc: vpc-123')
def test_igw(self):
self.assertEqual(
utils.resource_format(
{'InternetGatewayId': 'igw-x', 'Attachments': []},
'internet-gateway'),
'id: igw-x attachments: 0')
def test_alb(self):
self.assertEqual(
utils.resource_format(
{'LoadBalancerArn':
'arn:aws:elasticloadbalancing:us-east-1:367930536793:'
'loadbalancer/app/dev/1234567890',
'AvailabilityZones': [], 'Scheme': 'internal'},
'app-elb'),
'arn: arn:aws:elasticloadbalancing:us-east-1:367930536793:'
'loadbalancer/app/dev/1234567890'
' zones: 0 scheme: internal')
class GetAwsUsernameFromEvent(unittest.TestCase):
# note principalId is very org/domain specific for federated?, it would be
# good to get confirmation from capone on this event / test.
CLOUDTRAIL_EVENT = {
'detail': {
'userIdentity': {
"type": "IAMUser",
"principalId": "AIDAJ45Q7YFFAREXAMPLE",
"arn": "arn:aws:iam::123456789012:user/michael_bolton",
"accountId": "123456789012",
"accessKeyId": "AKIAIOSFODNN7EXAMPLE",
"userName": "michael_bolton"
}
}
}
def test_get(self):
username = utils.get_aws_username_from_event(
Mock(), self.CLOUDTRAIL_EVENT
)
self.assertEqual(username, 'michael_bolton')
def test_get_username_none(self):
self.assertEqual(
utils.get_aws_username_from_event(Mock(), None),
None
)
def test_get_username_identity_none(self):
evt = {'detail': {}}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_assumed_role(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'foo'
)
def test_get_username_assumed_role_instance(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo/i-12345678'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_assumed_role_lambda(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo/awslambda'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_assumed_role_colons(self):
evt = {
'detail': {
'userIdentity': {
'type': 'AssumedRole',
'arn': 'foo/bar:baz:blam'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'baz:blam'
)
def test_get_username_iam(self):
evt = {
'detail': {
'userIdentity': {
'type': 'IAMUser',
'userName': 'bar'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'bar'
)
def test_get_username_root(self):
evt = {
'detail': {
'userIdentity': {
'type': 'Root'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
None
)
def test_get_username_principalColon(self):
evt = {
'detail': {
'userIdentity': {
'type': 'foo',
'principalId': 'bar:baz'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'baz'
)
def test_get_username_principal(self):
evt = {
'detail': {
'userIdentity': {
'type': 'foo',
'principalId': 'blam'
}
}
}
self.assertEqual(
utils.get_aws_username_from_event(Mock(), evt),
'blam'
)
class ProviderSelector(unittest.TestCase):
def test_get_providers(self):
self.assertEqual(utils.get_provider({'queue_url': 'asq://'}), utils.Providers.Azure)
self.assertEqual(utils.get_provider({'queue_url': 'sqs://'}), utils.Providers.AWS)
class DecryptTests(unittest.TestCase):
@patch('c7n_mailer.utils.kms_decrypt')
def test_kms_decrypt(self, kms_decrypt_mock):
utils.decrypt({'queue_url': 'aws', 'test': 'test'}, Mock(), Mock(), 'test')
kms_decrypt_mock.assert_called_once()
def test_decrypt_none(self):
self.assertEqual(utils.decrypt({'queue_url': 'aws'}, Mock(), Mock(), 'test'), None)
self.assertEqual(utils.decrypt({'queue_url': 'asq'}, Mock(), Mock(), 'test'), None)
|
{
"content_hash": "bce6d39d0bc9603fd04dc796fee98c8f",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 92,
"avg_line_length": 29.638392857142858,
"alnum_prop": 0.48425967766229855,
"repo_name": "ewbankkit/cloud-custodian",
"id": "2ecf88281f7bf868b9c6052c3375a8e03a7f94df",
"size": "6663",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/c7n_mailer/tests/test_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7986"
},
{
"name": "Go",
"bytes": "145643"
},
{
"name": "HTML",
"bytes": "31"
},
{
"name": "Makefile",
"bytes": "9857"
},
{
"name": "PowerShell",
"bytes": "1749"
},
{
"name": "Python",
"bytes": "4913354"
},
{
"name": "Shell",
"bytes": "7277"
}
],
"symlink_target": ""
}
|
import csv
import numpy as np
import matplotlib.pyplot as plt
with open('/Users/tunder/Dropbox/GenreProject/python/piketty/badvolids.txt', encoding = 'utf-8') as f:
badids = [x.rstrip() for x in f.readlines()]
alldistribution = dict()
targetdistribution = dict()
def pricesymbol(snippet):
if ' $ ' in snippet:
return True
elif ' £ ' in snippet:
return True
elif ' ¢ ' in snippet:
return True
elif ' £ ' in snippet:
return True
elif '22nd' in snippet or '22d' in snippet:
return True
elif '23rd' in snippet or '23d' in snippet:
return True
elif '2d' in snippet:
return True
elif '3d' in snippet:
return True
# elif '$' in snippet or '£' in snippet:
# words = snippet.split()
# for word in words:
# if '$' in word and len(word) < 3:
# return True
# elif '£' in word and len(word) < 3:
# return True
# return False
else:
return False
def special_check(testcondition, snippet):
if testcondition != 'nonmonetary':
return True
elif not pricesymbol(snippet):
return True
else:
return False
def increment_dict(key, dictionary):
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
def decrement_dict(key, dictionary):
if key in dictionary:
dictionary[key] -= 1
else:
dictionary[key] = 0
testcondition = input("Context to check? ")
filepath = "codedsnippets.tsv"
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
idswerrors = dict()
toggle = True
for line in filelines:
line = line.rstrip()
fields = line.split('\t')
date = int(fields[0])
volid = fields[1]
if volid in badids or ("000" + volid) in badids:
continue
currency = fields[3]
facevalue = float(fields[4])
decade = int(date/10) * 10
increment_dict(decade, alldistribution)
if volid in idswerrors:
decrement_dict(volid, idswerrors)
context = fields[5]
if context == testcondition and special_check(context, fields[6]):
increment_dict(decade, targetdistribution)
increment_dict(volid, idswerrors)
if context != 'nonmonetary' and pricesymbol(fields[6]):
print('false negative')
outpath = testcondition + '_dist.csv'
numdecades = len(alldistribution)
x = np.zeros(numdecades)
y = np.zeros(numdecades)
with open(outpath, mode = 'w', encoding = 'utf-8') as f:
writer = csv.writer(f)
writer.writerow(['year', 'allsnips', 'errorsnips'])
idx = 0
for year, allsnips in alldistribution.items():
if year in targetdistribution:
target = targetdistribution[year]
else:
target = 0
row = [year, allsnips, target]
x[idx] = year
y[idx] = target / allsnips
idx += 1
writer.writerow(row)
plt.scatter(x, y)
plt.show()
with open('falsepositives.tsv', mode='w', encoding = 'utf-8') as f:
for key, value in idswerrors.items():
percent = (value + 1) / 4
outline = key + '\t' + str(percent)+ '\n'
f.write(outline)
|
{
"content_hash": "8eeaa24f1f2eb0d3c2fddcc78cfa19c9",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 102,
"avg_line_length": 25.608,
"alnum_prop": 0.6004373633239612,
"repo_name": "tedunderwood/GenreProject",
"id": "f804a420f62cd779522f04aa2ce06ef6ff196cf6",
"size": "3528",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/piketty/manualcoding/context_distribution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "187389"
},
{
"name": "Python",
"bytes": "645172"
},
{
"name": "R",
"bytes": "34870"
}
],
"symlink_target": ""
}
|
import os
import random
import uuid
from keystone.common.sql import migration
from keystone import config
from keystone import contrib
from keystone.openstack.common import importutils
from keystone.openstack.common import jsonutils
from keystone.openstack.common import log
from keystone.tests import mapping_fixtures
from keystone.tests import test_v3
CONF = config.CONF
LOG = log.getLogger(__name__)
def dummy_validator(*args, **kwargs):
pass
class FederationTests(test_v3.RestfulTestCase):
EXTENSION_NAME = 'federation'
EXTENSION_TO_ADD = 'federation_extension'
def __init__(self, *args, **kwargs):
super(FederationTests, self).__init__(*args, **kwargs)
def setup_database(self):
super(FederationTests, self).setup_database()
package_name = "%s.%s.migrate_repo" % (contrib.__name__,
self.EXTENSION_NAME)
package = importutils.import_module(package_name)
self.repo_path = os.path.abspath(os.path.dirname(package.__file__))
migration.db_version_control(version=None, repo_path=self.repo_path)
migration.db_sync(version=None, repo_path=self.repo_path)
class FederatedIdentityProviderTests(FederationTests):
"""A test class for Identity Providers."""
idp_keys = ['description', 'enabled']
default_body = {'description': None, 'enabled': True}
def base_url(self, suffix=None):
if suffix is not None:
return '/OS-FEDERATION/identity_providers/' + str(suffix)
return '/OS-FEDERATION/identity_providers'
def _fetch_attribute_from_response(self, resp, parameter,
assert_is_not_none=True):
"""Fetch single attribute from TestResponse object."""
result = resp.result.get(parameter, None)
if assert_is_not_none:
self.assertIsNotNone(result)
return result
def _fetch_attributes_from_response(self, resp, parameters=[],
assert_is_not_none=True):
"""Fetch parameters from the TestResponse object."""
result = dict()
kwargs = {'assert_is_not_none': assert_is_not_none}
for parameter in parameters:
value = self._fetch_attribute_from_response(resp, parameter,
**kwargs)
result[parameter] = value
return result
def _create_and_decapsulate_response(self, body=None):
"""Create IdP and fetch it's random id along with entity."""
default_resp = self._create_default_idp(body=body)
idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
self.assertIsNotNone(idp)
idp_id = idp.get('id')
return (idp_id, idp)
def _get_idp(self, idp_id):
"""Fetch IdP entity based on it's id."""
url = self.base_url(suffix=idp_id)
resp = self.get(url)
return resp
def _create_default_idp(self, body=None):
"""Create default IdP."""
url = self.base_url(suffix=uuid.uuid4().hex)
if body is None:
body = self._http_idp_input()
resp = self.put(url, body={'identity_provider': body},
expected_status=201)
return resp
def _http_idp_input(self, **kwargs):
"""Create default input for IdP data."""
body = None
if 'body' not in kwargs:
body = self.default_body.copy()
body['description'] = uuid.uuid4().hex
else:
body = kwargs['body']
return body
def _assign_protocol_to_idp(self, idp_id=None, proto=None, url=None,
mapping_id=None, validate=True, **kwargs):
if url is None:
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
if idp_id is None:
idp_id, _ = self._create_and_decapsulate_response()
if proto is None:
proto = uuid.uuid4().hex
if mapping_id is None:
mapping_id = uuid.uuid4().hex
body = {'mapping_id': mapping_id}
url = url % {'idp_id': idp_id, 'protocol_id': proto}
resp = self.put(url, body={'protocol': body}, **kwargs)
if validate:
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': mapping_id})
return (resp, idp_id, proto)
def _get_protocol(self, idp_id, protocol_id):
url = "%s/protocols/%s" % (idp_id, protocol_id)
url = self.base_url(suffix=url)
r = self.get(url)
return r
def test_create_idp(self):
"""Creates the IdentityProvider entity."""
keys_to_check = self.idp_keys
body = self._http_idp_input()
resp = self._create_default_idp(body=body)
self.assertValidResponse(resp, 'identity_provider', dummy_validator,
keys_to_check=keys_to_check,
ref=body)
def test_list_idps(self, iterations=5):
"""Lists all available IdentityProviders.
This test collects ids of created IdPs and
intersects it with the list of all available IdPs.
List of all IdPs can be a superset of IdPs created in this test,
because other tests also create IdPs.
"""
def get_id(resp):
r = self._fetch_attribute_from_response(resp,
'identity_provider')
return r.get('id')
ids = []
for _ in xrange(iterations):
id = get_id(self._create_default_idp())
ids.append(id)
ids = set(ids)
keys_to_check = self.idp_keys
url = self.base_url()
resp = self.get(url)
self.assertValidListResponse(resp, 'identity_providers',
dummy_validator,
keys_to_check=keys_to_check)
entities = self._fetch_attribute_from_response(resp,
'identity_providers')
entities_ids = set([e['id'] for e in entities])
ids_intersection = entities_ids.intersection(ids)
self.assertEqual(ids_intersection, ids)
def test_check_idp_uniqueness(self):
"""Add same IdP twice.
Expect HTTP 409 code for the latter call.
"""
url = self.base_url(suffix=uuid.uuid4().hex)
body = self._http_idp_input()
self.put(url, body={'identity_provider': body},
expected_status=201)
self.put(url, body={'identity_provider': body},
expected_status=409)
def test_get_idp(self):
"""Create and later fetch IdP."""
body = self._http_idp_input()
default_resp = self._create_default_idp(body=body)
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
resp = self.get(url)
self.assertValidResponse(resp, 'identity_provider',
dummy_validator, keys_to_check=body.keys(),
ref=body)
def test_get_nonexisting_idp(self):
"""Fetch nonexisting IdP entity.
Expected HTTP 404 status code.
"""
idp_id = uuid.uuid4().hex
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.get(url, expected_status=404)
def test_delete_existing_idp(self):
"""Create and later delete IdP.
Expect HTTP 404 for the GET IdP call.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
url = self.base_url(suffix=idp_id)
self.delete(url)
self.get(url, expected_status=404)
def test_delete_nonexisting_idp(self):
"""Delete nonexisting IdP.
Expect HTTP 404 for the GET IdP call.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
self.delete(url, expected_status=404)
def test_update_idp_mutable_attributes(self):
"""Update IdP's mutable parameters."""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
url = self.base_url(suffix=idp_id)
self.assertIsNotNone(idp_id)
_enabled = not default_idp.get('enabled')
body = {'description': uuid.uuid4().hex, 'enabled': _enabled}
body = {'identity_provider': body}
resp = self.patch(url, body=body)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
body = body['identity_provider']
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
resp = self.get(url)
updated_idp = self._fetch_attribute_from_response(resp,
'identity_provider')
for key in body.keys():
self.assertEqual(body[key], updated_idp.get(key))
def test_update_idp_immutable_attributes(self):
"""Update IdP's immutable parameters.
Expect HTTP 403 code.
"""
default_resp = self._create_default_idp()
default_idp = self._fetch_attribute_from_response(default_resp,
'identity_provider')
idp_id = default_idp.get('id')
self.assertIsNotNone(idp_id)
body = self._http_idp_input()
body['id'] = uuid.uuid4().hex
body['protocols'] = [uuid.uuid4().hex, uuid.uuid4().hex]
url = self.base_url(suffix=idp_id)
self.patch(url, body={'identity_provider': body}, expected_status=403)
def test_update_nonexistent_idp(self):
"""Update nonexistent IdP
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
url = self.base_url(suffix=idp_id)
body = self._http_idp_input()
body['enabled'] = False
body = {'identity_provider': body}
self.patch(url, body=body, expected_status=404)
def test_assign_protocol_to_idp(self):
"""Assign a protocol to existing IdP."""
self._assign_protocol_to_idp(expected_status=201)
def test_protocol_composite_pk(self):
"""Test whether Keystone let's add two entities with identical
names, however attached to diferent IdPs.
1. Add IdP and assign it protocol with predefined name
2. Add another IdP and assign it a protocol with same name.
Expect HTTP 201 code
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
def test_protocol_idp_pk_uniqueness(self):
"""Test whether Keystone checks for unique idp/protocol values.
Add same protocol twice, expect Keystone to reject a latter call and
return HTTP 409 code.
"""
url = self.base_url(suffix='%(idp_id)s/protocols/%(protocol_id)s')
kwargs = {'expected_status': 201}
resp, idp_id, proto = self._assign_protocol_to_idp(proto='saml2',
url=url, **kwargs)
kwargs = {'expected_status': 409}
resp, idp_id, proto = self._assign_protocol_to_idp(idp_id=idp_id,
proto='saml2',
validate=False,
url=url, **kwargs)
def test_assign_protocol_to_nonexistent_idp(self):
"""Assign protocol to IdP that doesn't exist.
Expect HTTP 404 code.
"""
idp_id = uuid.uuid4().hex
kwargs = {'expected_status': 404}
self._assign_protocol_to_idp(proto='saml2',
idp_id=idp_id,
validate=False,
**kwargs)
def test_get_protocol(self):
"""Create and later fetch protocol tied to IdP."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')['id']
url = "%s/protocols/%s" % (idp_id, proto_id)
url = self.base_url(suffix=url)
resp = self.get(url)
reference = {'id': proto_id}
self.assertValidResponse(resp, 'protocol',
dummy_validator,
keys_to_check=reference.keys(),
ref=reference)
def test_list_protocols(self):
"""Create set of protocols and later list them.
Compare input and output id sets.
"""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
iterations = random.randint(0, 16)
protocol_ids = []
for _ in xrange(iterations):
resp, _, proto = self._assign_protocol_to_idp(idp_id=idp_id,
expected_status=201)
proto_id = self._fetch_attribute_from_response(resp, 'protocol')
proto_id = proto_id['id']
protocol_ids.append(proto_id)
url = "%s/protocols" % idp_id
url = self.base_url(suffix=url)
resp = self.get(url)
self.assertValidListResponse(resp, 'protocols',
dummy_validator,
keys_to_check=['id'])
entities = self._fetch_attribute_from_response(resp, 'protocols')
entities = set([entity['id'] for entity in entities])
protocols_intersection = entities.intersection(protocol_ids)
self.assertEqual(protocols_intersection, set(protocol_ids))
def test_update_protocols_attribute(self):
"""Update protocol's attribute."""
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
new_mapping_id = uuid.uuid4().hex
url = "%s/protocols/%s" % (idp_id, proto)
url = self.base_url(suffix=url)
body = {'mapping_id': new_mapping_id}
resp = self.patch(url, body={'protocol': body})
self.assertValidResponse(resp, 'protocol', dummy_validator,
keys_to_check=['id', 'mapping_id'],
ref={'id': proto,
'mapping_id': new_mapping_id}
)
def test_delete_protocol(self):
"""Delete protocol.
Expect HTTP 404 code for the GET call after the protocol is deleted.
"""
url = self.base_url(suffix='/%(idp_id)s/'
'protocols/%(protocol_id)s')
resp, idp_id, proto = self._assign_protocol_to_idp(expected_status=201)
url = url % {'idp_id': idp_id,
'protocol_id': proto}
self.delete(url)
self.get(url, expected_status=404)
class MappingCRUDTests(FederationTests):
"""A class for testing CRUD operations for Mappings."""
MAPPING_URL = '/OS-FEDERATION/mappings/'
def assertValidMappingListResponse(self, resp, *args, **kwargs):
return self.assertValidListResponse(
resp,
'mappings',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMappingResponse(self, resp, *args, **kwargs):
return self.assertValidResponse(
resp,
'mapping',
self.assertValidMapping,
keys_to_check=[],
*args,
**kwargs)
def assertValidMapping(self, entity, ref=None):
self.assertIsNotNone(entity.get('id'))
self.assertIsNotNone(entity.get('rules'))
if ref:
self.assertEqual(jsonutils.loads(entity['rules']), ref['rules'])
return entity
def _create_default_mapping_entry(self):
url = self.MAPPING_URL + uuid.uuid4().hex
resp = self.put(url,
body={'mapping': mapping_fixtures.MAPPING_LARGE},
expected_status=201)
return resp
def _get_id_from_response(self, resp):
r = resp.result.get('mapping')
return r.get('id')
def test_mapping_create(self):
resp = self._create_default_mapping_entry()
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_list(self):
url = self.MAPPING_URL
self._create_default_mapping_entry()
resp = self.get(url)
entities = resp.result.get('mappings')
self.assertIsNotNone(entities)
self.assertResponseStatus(resp, 200)
self.assertValidListLinks(resp.result.get('links'))
self.assertEqual(len(entities), 1)
def test_mapping_delete(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': str(mapping_id)}
resp = self.delete(url)
self.assertResponseStatus(resp, 204)
self.get(url, expected_status=404)
def test_mapping_get(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_LARGE)
def test_mapping_update(self):
url = self.MAPPING_URL + '%(mapping_id)s'
resp = self._create_default_mapping_entry()
mapping_id = self._get_id_from_response(resp)
url = url % {'mapping_id': mapping_id}
resp = self.patch(url,
body={'mapping': mapping_fixtures.MAPPING_SMALL})
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
resp = self.get(url)
self.assertValidMappingResponse(resp, mapping_fixtures.MAPPING_SMALL)
def test_delete_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.delete(url, expected_status=404)
def test_get_mapping_dne(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.get(url, expected_status=404)
def test_create_mapping_bad_requirements(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_REQ})
def test_create_mapping_no_rules(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_RULES})
def test_create_mapping_no_remote_objects(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_NO_REMOTE})
def test_create_mapping_bad_value(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_BAD_VALUE})
def test_create_mapping_missing_local(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_LOCAL})
def test_create_mapping_missing_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_MISSING_TYPE})
def test_create_mapping_wrong_type(self):
url = self.MAPPING_URL + uuid.uuid4().hex
self.put(url, expected_status=400,
body={'mapping': mapping_fixtures.MAPPING_WRONG_TYPE})
|
{
"content_hash": "dd51cd5ed60f804560595f54d6c71c55",
"timestamp": "",
"source": "github",
"line_count": 549,
"max_line_length": 79,
"avg_line_length": 37.8816029143898,
"alnum_prop": 0.5591191037168822,
"repo_name": "dsiddharth/access-keys",
"id": "107a6045e7771047cb2631294cf6d272b6fd2074",
"size": "21390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keystone/tests/test_v3_federation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "16002"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "2619408"
},
{
"name": "Shell",
"bytes": "11206"
}
],
"symlink_target": ""
}
|
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pandas import CategoricalIndex, Index, MultiIndex
from pandas.compat import range
def assert_matching(actual, expected, check_dtype=False):
# avoid specifying internal representation
# as much as possible
assert len(actual) == len(expected)
for act, exp in zip(actual, expected):
act = np.asarray(act)
exp = np.asarray(exp)
tm.assert_numpy_array_equal(act, exp, check_dtype=check_dtype)
def test_get_level_number_integer(idx):
idx.names = [1, 0]
assert idx._get_level_number(1) == 0
assert idx._get_level_number(0) == 1
pytest.raises(IndexError, idx._get_level_number, 2)
tm.assert_raises_regex(KeyError, 'Level fourth not found',
idx._get_level_number, 'fourth')
def test_get_level_values(idx):
result = idx.get_level_values(0)
expected = Index(['foo', 'foo', 'bar', 'baz', 'qux', 'qux'],
name='first')
tm.assert_index_equal(result, expected)
assert result.name == 'first'
result = idx.get_level_values('first')
expected = idx.get_level_values(0)
tm.assert_index_equal(result, expected)
# GH 10460
index = MultiIndex(
levels=[CategoricalIndex(['A', 'B']),
CategoricalIndex([1, 2, 3])],
labels=[np.array([0, 0, 0, 1, 1, 1]),
np.array([0, 1, 2, 0, 1, 2])])
exp = CategoricalIndex(['A', 'A', 'A', 'B', 'B', 'B'])
tm.assert_index_equal(index.get_level_values(0), exp)
exp = CategoricalIndex([1, 2, 3, 1, 2, 3])
tm.assert_index_equal(index.get_level_values(1), exp)
def test_get_value_duplicates():
index = MultiIndex(levels=[['D', 'B', 'C'],
[0, 26, 27, 37, 57, 67, 75, 82]],
labels=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2],
[1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],
names=['tag', 'day'])
assert index.get_loc('D') == slice(0, 3)
with pytest.raises(KeyError):
index._engine.get_value(np.array([]), 'D')
def test_get_level_values_all_na():
# GH 17924 when level entirely consists of nan
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan], dtype=np.float64)
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1], dtype=object)
tm.assert_index_equal(result, expected)
def test_get_level_values_int_with_na():
# GH 17924
arrays = [['a', 'b', 'b'], [1, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([1, np.nan, 2])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], [np.nan, np.nan, 2]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = Index([np.nan, np.nan, 2])
tm.assert_index_equal(result, expected)
def test_get_level_values_na():
arrays = [[np.nan, np.nan, np.nan], ['a', np.nan, 1]]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([np.nan, np.nan, np.nan])
tm.assert_index_equal(result, expected)
result = index.get_level_values(1)
expected = pd.Index(['a', np.nan, 1])
tm.assert_index_equal(result, expected)
arrays = [['a', 'b', 'b'], pd.DatetimeIndex([0, 1, pd.NaT])]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(1)
expected = pd.DatetimeIndex([0, 1, pd.NaT])
tm.assert_index_equal(result, expected)
arrays = [[], []]
index = pd.MultiIndex.from_arrays(arrays)
result = index.get_level_values(0)
expected = pd.Index([], dtype=object)
tm.assert_index_equal(result, expected)
def test_set_name_methods(idx, index_names):
# so long as these are synonyms, we don't need to test set_names
assert idx.rename == idx.set_names
new_names = [name + "SUFFIX" for name in index_names]
ind = idx.set_names(new_names)
assert idx.names == index_names
assert ind.names == new_names
with tm.assert_raises_regex(ValueError, "^Length"):
ind.set_names(new_names + new_names)
new_names2 = [name + "SUFFIX2" for name in new_names]
res = ind.set_names(new_names2, inplace=True)
assert res is None
assert ind.names == new_names2
# set names for specific level (# GH7792)
ind = idx.set_names(new_names[0], level=0)
assert idx.names == index_names
assert ind.names == [new_names[0], index_names[1]]
res = ind.set_names(new_names2[0], level=0, inplace=True)
assert res is None
assert ind.names == [new_names2[0], index_names[1]]
# set names for multiple levels
ind = idx.set_names(new_names, level=[0, 1])
assert idx.names == index_names
assert ind.names == new_names
res = ind.set_names(new_names2, level=[0, 1], inplace=True)
assert res is None
assert ind.names == new_names2
def test_set_levels_labels_directly(idx):
# setting levels/labels directly raises AttributeError
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
labels = idx.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
with pytest.raises(AttributeError):
idx.levels = new_levels
with pytest.raises(AttributeError):
idx.labels = new_labels
def test_set_levels(idx):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
levels = idx.levels
new_levels = [[lev + 'a' for lev in level] for level in levels]
# level changing [w/o mutation]
ind2 = idx.set_levels(new_levels)
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
# level changing specific level [w/o mutation]
ind2 = idx.set_levels(new_levels[0], level=0)
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.set_levels(new_levels[1], level=1)
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/o mutation]
ind2 = idx.set_levels(new_levels, level=[0, 1])
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# level changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [new_levels[0], levels[1]])
assert_matching(idx.levels, levels)
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, [levels[0], new_levels[1]])
assert_matching(idx.levels, levels)
# level changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_levels(new_levels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.levels, new_levels)
assert_matching(idx.levels, levels)
# illegal level changing should not change levels
# GH 13754
original_index = idx.copy()
for inplace in [True, False]:
with tm.assert_raises_regex(ValueError, "^On"):
idx.set_levels(['c'], level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(ValueError, "^On"):
idx.set_labels([0, 1, 2, 3, 4, 5], level=0,
inplace=inplace)
assert_matching(idx.labels, original_index.labels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Levels"):
idx.set_levels('c', level=0, inplace=inplace)
assert_matching(idx.levels, original_index.levels,
check_dtype=True)
with tm.assert_raises_regex(TypeError, "^Labels"):
idx.set_labels(1, level=0, inplace=inplace)
assert_matching(idx.labels, original_index.labels,
check_dtype=True)
def test_set_labels(idx):
# side note - you probably wouldn't want to use levels and labels
# directly like this - but it is possible.
labels = idx.labels
major_labels, minor_labels = labels
major_labels = [(x + 1) % 3 for x in major_labels]
minor_labels = [(x + 1) % 1 for x in minor_labels]
new_labels = [major_labels, minor_labels]
# label changing [w/o mutation]
ind2 = idx.set_labels(new_labels)
assert_matching(ind2.labels, new_labels)
assert_matching(idx.labels, labels)
# label changing [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
# label changing specific level [w/o mutation]
ind2 = idx.set_labels(new_labels[0], level=0)
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(idx.labels, labels)
ind2 = idx.set_labels(new_labels[1], level=1)
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(idx.labels, labels)
# label changing multiple levels [w/o mutation]
ind2 = idx.set_labels(new_labels, level=[0, 1])
assert_matching(ind2.labels, new_labels)
assert_matching(idx.labels, labels)
# label changing specific level [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels[0], level=0, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [new_labels[0], labels[1]])
assert_matching(idx.labels, labels)
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels[1], level=1, inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, [labels[0], new_labels[1]])
assert_matching(idx.labels, labels)
# label changing multiple levels [w/ mutation]
ind2 = idx.copy()
inplace_return = ind2.set_labels(new_labels, level=[0, 1],
inplace=True)
assert inplace_return is None
assert_matching(ind2.labels, new_labels)
assert_matching(idx.labels, labels)
# label changing for levels of different magnitude of categories
ind = pd.MultiIndex.from_tuples([(0, i) for i in range(130)])
new_labels = range(129, -1, -1)
expected = pd.MultiIndex.from_tuples(
[(0, i) for i in new_labels])
# [w/o mutation]
result = ind.set_labels(labels=new_labels, level=1)
assert result.equals(expected)
# [w/ mutation]
result = ind.copy()
result.set_labels(labels=new_labels, level=1, inplace=True)
assert result.equals(expected)
def test_set_levels_labels_names_bad_input(idx):
levels, labels = idx.levels, idx.labels
names = idx.names
with tm.assert_raises_regex(ValueError, 'Length of levels'):
idx.set_levels([levels[0]])
with tm.assert_raises_regex(ValueError, 'Length of labels'):
idx.set_labels([labels[0]])
with tm.assert_raises_regex(ValueError, 'Length of names'):
idx.set_names([names[0]])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
idx.set_levels(levels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
idx.set_labels(labels[0])
# shouldn't scalar data error, instead should demand list-like
with tm.assert_raises_regex(TypeError, 'list-like'):
idx.set_names(names[0])
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
idx.set_levels(levels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
idx.set_levels(levels, level=0)
# should have equal lengths
with tm.assert_raises_regex(TypeError, 'list of lists-like'):
idx.set_labels(labels[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'list-like'):
idx.set_labels(labels, level=0)
# should have equal lengths
with tm.assert_raises_regex(ValueError, 'Length of names'):
idx.set_names(names[0], level=[0, 1])
with tm.assert_raises_regex(TypeError, 'Names must be a'):
idx.set_names(names, level=0)
@pytest.mark.parametrize('inplace', [True, False])
def test_set_names_with_nlevel_1(inplace):
# GH 21149
# Ensure that .set_names for MultiIndex with
# nlevels == 1 does not raise any errors
expected = pd.MultiIndex(levels=[[0, 1]],
labels=[[0, 1]],
names=['first'])
m = pd.MultiIndex.from_product([[0, 1]])
result = m.set_names('first', level=0, inplace=inplace)
if inplace:
result = m
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize('ordered', [True, False])
def test_set_levels_categorical(ordered):
# GH13854
index = MultiIndex.from_arrays([list("xyzx"), [0, 1, 2, 3]])
cidx = CategoricalIndex(list("bac"), ordered=ordered)
result = index.set_levels(cidx, 0)
expected = MultiIndex(levels=[cidx, [0, 1, 2, 3]],
labels=index.labels)
tm.assert_index_equal(result, expected)
result_lvl = result.get_level_values(0)
expected_lvl = CategoricalIndex(list("bacb"),
categories=cidx.categories,
ordered=cidx.ordered)
tm.assert_index_equal(result_lvl, expected_lvl)
def test_set_value_keeps_names():
# motivating example from #3742
lev1 = ['hans', 'hans', 'hans', 'grethe', 'grethe', 'grethe']
lev2 = ['1', '2', '3'] * 2
idx = pd.MultiIndex.from_arrays([lev1, lev2], names=['Name', 'Number'])
df = pd.DataFrame(
np.random.randn(6, 4),
columns=['one', 'two', 'three', 'four'],
index=idx)
df = df.sort_index()
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
df.at[('grethe', '4'), 'one'] = 99.34
assert df._is_copy is None
assert df.index.names == ('Name', 'Number')
|
{
"content_hash": "d0e107a613766f4e36d3fd32f25995eb",
"timestamp": "",
"source": "github",
"line_count": 413,
"max_line_length": 75,
"avg_line_length": 35.73365617433414,
"alnum_prop": 0.6271852554546686,
"repo_name": "cython-testbed/pandas",
"id": "99ab54a83636c9c7c50ed88462051a685d6f0cc2",
"size": "14784",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/indexes/multi/test_get_set.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14136208"
},
{
"name": "Shell",
"bytes": "27731"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
'''
Module: player
Author: David Frye
Description: Contains the Player class.
'''
class Player:
'''
'''
def __init__(self):
'''
'''
return
|
{
"content_hash": "96cb1251a52ff5ec9fd8fac879ca08ca",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 39,
"avg_line_length": 10,
"alnum_prop": 0.5866666666666667,
"repo_name": "DFrye333/DynamicMaze",
"id": "32ae43250819ff0921859dd79242f798df68dd3c",
"size": "150",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "player.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "37734"
}
],
"symlink_target": ""
}
|
from colorama import Fore
import math
import os
import requests
import subprocess
import tqdm
from dotgen import hashing
rank = 0
def handle(output_dir, config):
for download in config:
print(Fore.WHITE + "download: " + download + Fore.RESET)
cfg = config[download]
download_path = os.path.join(output_dir, cfg["path"])
hash_cfg = cfg["hash"]
if os.path.exists(download_path):
hash = hashing.hash_file(download_path)
if hash != hash_cfg:
os.remove(download_path)
continue
dir = os.path.dirname(download_path)
if not os.path.exists(dir):
os.makedirs(dir)
req = requests.get(cfg["url"], stream=True)
size = int(req.headers.get("content-length"))
chunk_size = 2048
with open(download_path, "wb") as fhandle:
with tqdm.tqdm(total=size) as pbar:
for chunk in req.iter_content(chunk_size=chunk_size):
out = fhandle.write(chunk)
if out < 0:
out = 0
pbar.update(out)
hash = hashing.hash_file(download_path)
if hash != hash_cfg:
os.remove(download_path)
continue
if "chmod" in cfg:
subprocess.call(["chmod", str(cfg["chmod"]), download_path])
print(Fore.WHITE + "done\n" + Fore.RESET)
|
{
"content_hash": "9af30ac66be603bd2020126be2086c38",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 72,
"avg_line_length": 28.46,
"alnum_prop": 0.5558678847505271,
"repo_name": "f-koehler/dotgen",
"id": "b0fd12f147d9e8b3d5442dcb63f1edb53fd1df4e",
"size": "1447",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dotgen/plugins/download.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14804"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name="double_down",
version="1.0.1",
author="Stephen Melnicki",
author_email="smelnicki3@gmail.com",
packages=find_packages(),
description="A silly example of a python decorator",
long_description=open("README.rst").read(),
keywords="sample decorators",
license="MIT (See LICENSE.rst)",
url="https://github.com/smelnicki/double_down",
)
|
{
"content_hash": "d79890ad5828040df3174dc249f2323d",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 56,
"avg_line_length": 30.642857142857142,
"alnum_prop": 0.682983682983683,
"repo_name": "smelnicki/double_down",
"id": "8998465d5a729b5917b3633ad29081064851996f",
"size": "429",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1194"
}
],
"symlink_target": ""
}
|
import typing as t
from datetime import timedelta
import pytest
from pycroft.lib import user as lib_user
from pycroft.model.facilities import Room
from pycroft.model.task import Task, UserTask, TaskStatus, TaskType
from pycroft.model.task_serialization import UserMoveParams
from pycroft.model.user import User
from tests import factories
from tests.assertions import assert_unchanged
from tests.lib.user.task_helpers import create_task_and_execute
class TestUserMove:
@pytest.fixture(scope="class")
def subnet(self, class_session):
return factories.SubnetFactory.create()
@pytest.fixture(scope="class")
def user(self, class_session, subnet) -> User:
return factories.UserFactory(
with_host=True,
room__patched_with_subnet=True,
room__patch_ports__switch_port__default_vlans__subnets=[subnet]
)
@pytest.fixture(scope="class")
def new_room_other_building(self, class_session) -> Room:
return factories.RoomFactory(patched_with_subnet=True)
@pytest.fixture(scope="class")
def new_room_same_building(self, class_session, user, subnet) -> Room:
return factories.RoomFactory(
building=user.building,
patched_with_subnet=True,
patch_ports__switch_port__default_vlans__subnets=[subnet],
)
def test_move_scheduling(
self, session, utcnow, user, processor, new_room_other_building
):
when = utcnow + timedelta(days=1)
lib_user.move(
user,
building_id=new_room_other_building.building.id,
level=new_room_other_building.level,
room_number=new_room_other_building.number,
processor=processor,
when=when,
)
tasks = session.query(Task).all()
assert len(tasks) == 1
[task] = tasks
assert isinstance(task, UserTask)
assert task.user == user
assert task.parameters == UserMoveParams(
building_id=new_room_other_building.building.id,
level=new_room_other_building.level,
room_number=new_room_other_building.number,
)
def test_moves_into_same_room(self, session, user, processor):
old_room = user.room
with pytest.raises(AssertionError):
lib_user.move(
user, old_room.building.id, old_room.level, old_room.number, processor
)
def test_moves_into_other_building(
self, session, user, processor, new_room_other_building
):
lib_user.move(
user,
new_room_other_building.building.id,
new_room_other_building.level,
new_room_other_building.number,
processor,
)
assert user.room == new_room_other_building
assert user.hosts[0].room == new_room_other_building
# TODO test for changing ip
class TestMoveImpl:
@pytest.fixture(scope="class")
def user(self, class_session, config) -> User:
return factories.UserFactory.create(
with_membership=True,
membership__group=config.member_group,
with_host=True,
)
@pytest.fixture(scope="class")
def old_room(self, user):
return user.room
@pytest.fixture(scope="class")
def new_room(self, class_session) -> Room:
room = factories.RoomFactory.create()
class_session.flush()
return room
@pytest.fixture(scope="class")
def full_params(self, new_room) -> dict[str]:
return {
"level": new_room.level,
"building_id": new_room.building_id,
"room_number": new_room.number,
}
def test_successful_move_execution(self, session, user, new_room, full_params):
task = create_task_and_execute(TaskType.USER_MOVE, user, full_params)
assert task.status == TaskStatus.EXECUTED
assert user.room == new_room
@pytest.mark.parametrize(
"param_keys, error_needle",
(
(("building_id", "room_number"), "level"),
(("level", "room_number"), "building_id"),
(("level", "building_id"), "room_number"),
),
)
def test_all_params_required(
self,
session,
user,
new_room,
full_params,
param_keys: t.Iterable[str],
error_needle: str,
):
params = {k: v for k, v in full_params.items() if k in param_keys}
with assert_unchanged(lambda: user.room):
task = create_task_and_execute(TaskType.USER_MOVE, user, params)
assert task.status == TaskStatus.FAILED
assert len(task.errors) == 1
[error] = task.errors
assert error_needle in error.lower()
|
{
"content_hash": "512d3abf70ffedd2e73b64894c5d120e",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 86,
"avg_line_length": 33.702127659574465,
"alnum_prop": 0.6132154882154882,
"repo_name": "agdsn/pycroft",
"id": "105b6f6ee1dc52d50ea2b118610913ea9b474ffc",
"size": "4752",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/lib/user/test_move.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10320"
},
{
"name": "Dockerfile",
"bytes": "3341"
},
{
"name": "HTML",
"bytes": "124781"
},
{
"name": "JavaScript",
"bytes": "74707"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1172012"
},
{
"name": "Shell",
"bytes": "13660"
},
{
"name": "TypeScript",
"bytes": "5231"
}
],
"symlink_target": ""
}
|
"""Base Modin Dataframe classes related to its partitioning."""
|
{
"content_hash": "be342f7e2f0ad5a34d16aa664216b77a",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 63,
"avg_line_length": 64,
"alnum_prop": 0.765625,
"repo_name": "modin-project/modin",
"id": "a7992787753cd627748b9d74af1f67ab4d347f25",
"size": "847",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modin/core/dataframe/base/partitioning/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2330"
},
{
"name": "Python",
"bytes": "3914783"
},
{
"name": "Shell",
"bytes": "2377"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('nomi', '0135_auto_20170806_1111'),
('nomi', '0135_auto_20170806_1015'),
]
operations = [
]
|
{
"content_hash": "0f08ef68fdfcc2611de16bb03f901346",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 44,
"avg_line_length": 18.5,
"alnum_prop": 0.6254826254826255,
"repo_name": "SummerCamp17/Gymkhana-Nominations",
"id": "901fb06cac66f00f30208b01978268567bef8151",
"size": "332",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nomi/migrations/0136_merge_20170806_1143.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13871"
},
{
"name": "HTML",
"bytes": "187973"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "249674"
}
],
"symlink_target": ""
}
|
"""This module contains test objects with unexpected __name__ attributes.
It is used for testing aeta.logic.
"""
__author__ = 'jacobltaylor@google.com (Jacob Taylor)'
import unittest
# Change the module's __name__. The module's __name__ no longer starts with
# 'test_'; nevertheless, it should be included in the test suite because it was
# imported as test_badnames.
__name__ = 'module_name'
class ClassWithDifferentModule(unittest.TestCase):
def test_method(self):
pass
ClassWithDifferentModule.__module__ = 'class_module'
class ClassWithDifferentName1(unittest.TestCase):
def test_method(self):
pass
class ClassWithDifferentName2(unittest.TestCase):
def test_method(self):
pass
# Make the classes be named the same thing. This could happen due to e.g.
# using the same decorator that doesn't set __name__ properly.
ClassWithDifferentName1.__name__ = 'ClassName'
ClassWithDifferentName2.__name__ = 'ClassName'
class ClassWithDifferentMethodNames(unittest.TestCase):
def test_method1(self):
pass
def test_method2(self):
pass
# Make the methods be named the same thing. This could happen due to e.g.
# using the same decorator that didn't set __name__ properly. Notice that the
# method names do not start with 'test_'; nevertheless, they should be included
# in the test suite because the attribute names start with __test__.
ClassWithDifferentMethodNames.test_method1.im_func.__name__ = 'method_name'
ClassWithDifferentMethodNames.test_method2.im_func.__name__ = 'method_name'
|
{
"content_hash": "33380b5dd799c26c81c0a7983f6bc0c7",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 79,
"avg_line_length": 26.06779661016949,
"alnum_prop": 0.7366710013003901,
"repo_name": "zenlambda/aeta",
"id": "77b887c6e8c87592fc0c0315f5ab0dbe0e7bec3b",
"size": "2132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "testdata/test_modules/sample_package/test_badnames.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2059"
},
{
"name": "HTML",
"bytes": "3351"
},
{
"name": "JavaScript",
"bytes": "60153"
},
{
"name": "Python",
"bytes": "752628"
},
{
"name": "Shell",
"bytes": "1580"
}
],
"symlink_target": ""
}
|
from typing import Awaitable, Callable, Dict, Optional, Sequence, Tuple, Union
import warnings
from google.api_core import gapic_v1, grpc_helpers_async, operations_v1
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.cloud.location import locations_pb2 # type: ignore
from google.longrunning import operations_pb2 # type: ignore
import grpc # type: ignore
from grpc.experimental import aio # type: ignore
from google.cloud.run_v2.types import revision
from .base import DEFAULT_CLIENT_INFO, RevisionsTransport
from .grpc import RevisionsGrpcTransport
class RevisionsGrpcAsyncIOTransport(RevisionsTransport):
"""gRPC AsyncIO backend transport for Revisions.
Cloud Run Revision Control Plane API.
This class defines the same methods as the primary client, so the
primary client can load the underlying transport implementation
and call it.
It sends protocol buffers over the wire using gRPC (which is built on
top of HTTP/2); the ``grpcio`` package must be installed.
"""
_grpc_channel: aio.Channel
_stubs: Dict[str, Callable] = {}
@classmethod
def create_channel(
cls,
host: str = "run.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
**kwargs,
) -> aio.Channel:
"""Create and return a gRPC AsyncIO channel object.
Args:
host (Optional[str]): The host for the channel to use.
credentials (Optional[~.Credentials]): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
kwargs (Optional[dict]): Keyword arguments, which are passed to the
channel creation.
Returns:
aio.Channel: A gRPC AsyncIO channel object.
"""
return grpc_helpers_async.create_channel(
host,
credentials=credentials,
credentials_file=credentials_file,
quota_project_id=quota_project_id,
default_scopes=cls.AUTH_SCOPES,
scopes=scopes,
default_host=cls.DEFAULT_HOST,
**kwargs,
)
def __init__(
self,
*,
host: str = "run.googleapis.com",
credentials: Optional[ga_credentials.Credentials] = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
channel: Optional[aio.Channel] = None,
api_mtls_endpoint: Optional[str] = None,
client_cert_source: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
ssl_channel_credentials: Optional[grpc.ChannelCredentials] = None,
client_cert_source_for_mtls: Optional[Callable[[], Tuple[bytes, bytes]]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
api_audience: Optional[str] = None,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is ignored if ``channel`` is provided.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is ignored if ``channel`` is provided.
scopes (Optional[Sequence[str]]): A optional list of scopes needed for this
service. These are only used when credentials are not specified and
are passed to :func:`google.auth.default`.
channel (Optional[aio.Channel]): A ``Channel`` instance through
which to make calls.
api_mtls_endpoint (Optional[str]): Deprecated. The mutual TLS endpoint.
If provided, it overrides the ``host`` argument and tries to create
a mutual TLS channel with client SSL credentials from
``client_cert_source`` or application default SSL credentials.
client_cert_source (Optional[Callable[[], Tuple[bytes, bytes]]]):
Deprecated. A callback to provide client SSL certificate bytes and
private key bytes, both in PEM format. It is ignored if
``api_mtls_endpoint`` is None.
ssl_channel_credentials (grpc.ChannelCredentials): SSL credentials
for the grpc channel. It is ignored if ``channel`` is provided.
client_cert_source_for_mtls (Optional[Callable[[], Tuple[bytes, bytes]]]):
A callback to provide client certificate bytes and private key bytes,
both in PEM format. It is used to configure a mutual TLS channel. It is
ignored if ``channel`` or ``ssl_channel_credentials`` is provided.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
google.api_core.exceptions.DuplicateCredentialArgs: If both ``credentials``
and ``credentials_file`` are passed.
"""
self._grpc_channel = None
self._ssl_channel_credentials = ssl_channel_credentials
self._stubs: Dict[str, Callable] = {}
self._operations_client: Optional[operations_v1.OperationsAsyncClient] = None
if api_mtls_endpoint:
warnings.warn("api_mtls_endpoint is deprecated", DeprecationWarning)
if client_cert_source:
warnings.warn("client_cert_source is deprecated", DeprecationWarning)
if channel:
# Ignore credentials if a channel was passed.
credentials = False
# If a channel was explicitly provided, set it.
self._grpc_channel = channel
self._ssl_channel_credentials = None
else:
if api_mtls_endpoint:
host = api_mtls_endpoint
# Create SSL credentials with client_cert_source or application
# default SSL credentials.
if client_cert_source:
cert, key = client_cert_source()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
else:
self._ssl_channel_credentials = SslCredentials().ssl_credentials
else:
if client_cert_source_for_mtls and not ssl_channel_credentials:
cert, key = client_cert_source_for_mtls()
self._ssl_channel_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
# The base transport sets the host, credentials and scopes
super().__init__(
host=host,
credentials=credentials,
credentials_file=credentials_file,
scopes=scopes,
quota_project_id=quota_project_id,
client_info=client_info,
always_use_jwt_access=always_use_jwt_access,
api_audience=api_audience,
)
if not self._grpc_channel:
self._grpc_channel = type(self).create_channel(
self._host,
# use the credentials which are saved
credentials=self._credentials,
# Set ``credentials_file`` to ``None`` here as
# the credentials that we saved earlier should be used.
credentials_file=None,
scopes=self._scopes,
ssl_credentials=self._ssl_channel_credentials,
quota_project_id=quota_project_id,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Wrap messages. This must be done after self._grpc_channel exists
self._prep_wrapped_messages(client_info)
@property
def grpc_channel(self) -> aio.Channel:
"""Create the channel designed to connect to this service.
This property caches on the instance; repeated calls return
the same channel.
"""
# Return the channel from cache.
return self._grpc_channel
@property
def operations_client(self) -> operations_v1.OperationsAsyncClient:
"""Create the client designed to process long-running operations.
This property caches on the instance; repeated calls return the same
client.
"""
# Quick check: Only create a new client if we do not already have one.
if self._operations_client is None:
self._operations_client = operations_v1.OperationsAsyncClient(
self.grpc_channel
)
# Return the client from cache.
return self._operations_client
@property
def get_revision(
self,
) -> Callable[[revision.GetRevisionRequest], Awaitable[revision.Revision]]:
r"""Return a callable for the get revision method over gRPC.
Gets information about a Revision.
Returns:
Callable[[~.GetRevisionRequest],
Awaitable[~.Revision]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_revision" not in self._stubs:
self._stubs["get_revision"] = self.grpc_channel.unary_unary(
"/google.cloud.run.v2.Revisions/GetRevision",
request_serializer=revision.GetRevisionRequest.serialize,
response_deserializer=revision.Revision.deserialize,
)
return self._stubs["get_revision"]
@property
def list_revisions(
self,
) -> Callable[
[revision.ListRevisionsRequest], Awaitable[revision.ListRevisionsResponse]
]:
r"""Return a callable for the list revisions method over gRPC.
Lists Revisions from a given Service, or from a given
location.
Returns:
Callable[[~.ListRevisionsRequest],
Awaitable[~.ListRevisionsResponse]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_revisions" not in self._stubs:
self._stubs["list_revisions"] = self.grpc_channel.unary_unary(
"/google.cloud.run.v2.Revisions/ListRevisions",
request_serializer=revision.ListRevisionsRequest.serialize,
response_deserializer=revision.ListRevisionsResponse.deserialize,
)
return self._stubs["list_revisions"]
@property
def delete_revision(
self,
) -> Callable[
[revision.DeleteRevisionRequest], Awaitable[operations_pb2.Operation]
]:
r"""Return a callable for the delete revision method over gRPC.
Deletes a Revision.
Returns:
Callable[[~.DeleteRevisionRequest],
Awaitable[~.Operation]]:
A function that, when called, will call the underlying RPC
on the server.
"""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_revision" not in self._stubs:
self._stubs["delete_revision"] = self.grpc_channel.unary_unary(
"/google.cloud.run.v2.Revisions/DeleteRevision",
request_serializer=revision.DeleteRevisionRequest.serialize,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["delete_revision"]
def close(self):
return self.grpc_channel.close()
@property
def delete_operation(
self,
) -> Callable[[operations_pb2.DeleteOperationRequest], None]:
r"""Return a callable for the delete_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "delete_operation" not in self._stubs:
self._stubs["delete_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/DeleteOperation",
request_serializer=operations_pb2.DeleteOperationRequest.SerializeToString,
response_deserializer=None,
)
return self._stubs["delete_operation"]
@property
def get_operation(
self,
) -> Callable[[operations_pb2.GetOperationRequest], operations_pb2.Operation]:
r"""Return a callable for the get_operation method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "get_operation" not in self._stubs:
self._stubs["get_operation"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/GetOperation",
request_serializer=operations_pb2.GetOperationRequest.SerializeToString,
response_deserializer=operations_pb2.Operation.FromString,
)
return self._stubs["get_operation"]
@property
def list_operations(
self,
) -> Callable[
[operations_pb2.ListOperationsRequest], operations_pb2.ListOperationsResponse
]:
r"""Return a callable for the list_operations method over gRPC."""
# Generate a "stub function" on-the-fly which will actually make
# the request.
# gRPC handles serialization and deserialization, so we just need
# to pass in the functions for each.
if "list_operations" not in self._stubs:
self._stubs["list_operations"] = self.grpc_channel.unary_unary(
"/google.longrunning.Operations/ListOperations",
request_serializer=operations_pb2.ListOperationsRequest.SerializeToString,
response_deserializer=operations_pb2.ListOperationsResponse.FromString,
)
return self._stubs["list_operations"]
__all__ = ("RevisionsGrpcAsyncIOTransport",)
|
{
"content_hash": "c586e61c999d129090dbdf7066808f19",
"timestamp": "",
"source": "github",
"line_count": 378,
"max_line_length": 91,
"avg_line_length": 44.473544973544975,
"alnum_prop": 0.617452858247576,
"repo_name": "googleapis/python-run",
"id": "55ee1fd98d39ffd388c883c50d486113ba78f46c",
"size": "17411",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "google/cloud/run_v2/services/revisions/transports/grpc_asyncio.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "2048025"
},
{
"name": "Shell",
"bytes": "30651"
}
],
"symlink_target": ""
}
|
import logging
import re
import urlparse
import smtplib
from django.conf import settings
from django import template
from django.template import loader
from django.core import mail
from common import exception
from common import util
def is_allowed_to_send_email_to(email):
if settings.EMAIL_LIMIT_DOMAIN:
limit_email = re.compile(r'.+@%s$' % (settings.EMAIL_LIMIT_DOMAIN))
return limit_email.match(email)
if settings.EMAIL_TEST_ONLY and email not in settings.EMAIL_TEST_ADDRESSES:
return False
return True
def _greeting_name(actor_ref):
try:
return actor_ref.extra['given_name']
except KeyError:
return actor_ref.display_nick()
def _full_name(actor_ref):
try:
return "%s %s" % (actor_ref.extra['given_name'],
actor_ref.extra['family_name'])
except KeyError:
return actor_ref.display_nick()
def log_blocked_send(on_behalf, email, subject, message=None):
logging.info("Not sending an email on behalf of %s to a blocked address to %s: %s",
on_behalf, email, subject)
if message:
logging.info("MSG: \n%s", message)
def send(to_email, subject, message, on_behalf=None, html_message=None):
on_behalf = on_behalf and on_behalf or settings.DEFAULT_FROM_EMAIL
if is_allowed_to_send_email_to(to_email):
email_message = mail.EmailMultiAlternatives(subject=subject,
body=message,
from_email=on_behalf,
to=(to_email,))
if html_message:
email_message.attach_alternative(html_message, mimetype="text/html")
# uses the default email sender, see DEFAULT_FROM_EMAIL in settings.py
# if on_behalf is None
fail_silently = settings.MANAGE_PY
return email_message.send(fail_silently)
else:
log_blocked_send(on_behalf, to_email, subject, message)
raise exception.ValidationError("Cannot send to that email address")
def filter_out_blocked_addresses(message_tuples):
send_count = 0
allowed = []
for subject, message, from_email, recipients in message_tuples:
blocked = [r for r in recipients if not is_allowed_to_send_email_to(r)]
for r in blocked:
log_blocked_send(from_email, r, subject)
send_count += 1
allowed_recipients = [r for r in recipients if not r in blocked]
allowed.append((subject, message, from_email, allowed_recipients))
return (allowed, send_count)
def mass_send(message_tuples):
send_count = 0
allowed, fake_send_count = filter_out_blocked_addresses(message_tuples)
send_count += fake_send_count
send_count += mail.send_mass_mail(tuple(allowed))
return send_count
def email_comment_notification(actor_to_ref, actor_from_ref,
comment_ref, entry_ref):
"""Send an email in response to a comment being posted.
PARAMETERS:
actor_to_ref - actor whom this email is going to
actor_from_ref - actor who posted the comment
comment_ref - the comment that was posted
entry_ref - the entry that was commented on
RETURNS: (subject, message)
"""
entry_url = entry_ref.url()
from_name = actor_from_ref.display_nick()
my_entry = (actor_to_ref.nick == entry_ref.actor)
entry_actor_name = util.display_nick(entry_ref.actor)
entry_title = entry_ref.title()
# TODO(termie) pretty 'r up
comment_pretty = comment_ref.extra.get('content', '')
t = loader.get_template('common/templates/email/email_comment.txt')
c = template.Context(locals(), autoescape=False)
message = t.render(c)
subject = 'New comment on %s' % (entry_ref.title())
return (subject, message)
def email_confirmation_message(actor, activation_code):
name = _greeting_name(actor)
# TODO(teemu): what is a canonical way to do get URLs in Django?
activation_url = 'http://%s/confirm/email/%s' % (settings.DOMAIN, activation_code)
email_first = name
email_link = activation_url
t = loader.get_template('common/templates/email/email_confirm.txt')
c = template.Context(locals(), autoescape=False)
message = t.render(c)
c.autoescape = True
html_template = loader.get_template(
'common/templates/email/email_confirm.html')
html_message = html_template.render(c)
subject = "Welcome! Confirm your email"
return (subject, message, html_message)
def email_invite(from_actor_ref, invite_code):
full_name = _full_name(from_actor_ref)
nick_name = from_actor_ref.display_nick()
accept_url = 'http://%s/invite/email/%s' % (settings.DOMAIN, invite_code)
t = loader.get_template('common/templates/email/email_invite.txt')
c = template.Context(locals(), autoescape=False)
message = t.render(c)
c.autoescape = True
html_template = loader.get_template(
'common/templates/email/email_invite.html')
html_message = html_template.render(c)
subject = '%s invited you to %s' % (full_name, settings.SITE_NAME)
return (subject, message, html_message)
def email_new_follower(owner_ref, target_ref):
email_url = owner_ref.url()
t = loader.get_template('common/templates/email/email_new_follower.txt')
c = template.Context(locals(), autoescape=False)
message = t.render(c)
c.autoescape = True
html_template = loader.get_template(
'common/templates/email/email_new_follower.html')
html_message = html_template.render(c)
subject = '%s now follows you' % owner_ref.display_nick()
return (subject, message, html_message)
def email_new_follower_mutual(owner_ref, target_ref):
profile_url = owner_ref.url()
full_name = _full_name(owner_ref)
t = loader.get_template(
'common/templates/email/email_new_follower_mutual.txt')
c = template.Context(locals(), autoescape=False)
message = t.render(c)
c.autoescape = True
html_template = loader.get_template(
'common/templates/email/email_new_follower_mutual.html')
html_message = html_template.render(c)
subject = '%s is now following you, too' % full_name
return (subject, message, html_message)
def email_lost_password(actor, email, code):
email_link = ("http://%s/login/reset?email=%s&hash=%s" %
(settings.DOMAIN, email, code))
t = loader.get_template('common/templates/email/email_password.txt')
c = template.Context(locals(), autoescape=False)
message = t.render(c)
c.autoescape = True
html_template = loader.get_template(
'common/templates/email/email_password.html')
html_message = html_template.render(c)
subject = ('Password reset')
return (subject, message, html_message)
|
{
"content_hash": "473566806bab6bad89ec4642bd004e41",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 85,
"avg_line_length": 35.64804469273743,
"alnum_prop": 0.7016141670584548,
"repo_name": "jimpick/jaikuengine",
"id": "01f883ff8a3ad1fd2d8bbba1fe839b8f34413d8f",
"size": "6957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common/mail.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "101456"
},
{
"name": "Python",
"bytes": "829875"
},
{
"name": "R",
"bytes": "1277"
},
{
"name": "Shell",
"bytes": "4091"
}
],
"symlink_target": ""
}
|
from os_win import constants
from os_win import exceptions as os_win_exc
from os_win import utilsfactory
from oslo_config import cfg
from oslo_log import log as logging
from designate.backend.agent_backend import base
from designate import exceptions
LOG = logging.getLogger(__name__)
class MSDNSBackend(base.AgentBackend):
__plugin_name__ = 'msdns'
__backend_status__ = 'experimental'
def __init__(self, agent_service):
"""Configure the backend"""
super(MSDNSBackend, self).__init__(agent_service)
self._dnsutils = utilsfactory.get_dnsutils()
masters = cfg.CONF['service:agent'].masters
if not masters:
raise exceptions.Backend("Missing agent AXFR masters")
# Only ip addresses are needed
self._masters = [ns.split(":")[0] for ns in masters]
LOG.info("AXFR masters: %r", self._masters)
def start(self):
"""Start the backend"""
LOG.info("Started msdns backend")
def find_zone_serial(self, zone_name):
"""Return the zone's serial"""
zone_name = zone_name.rstrip(".")
LOG.debug("Finding zone: %s", zone_name)
try:
return self._dnsutils.get_zone_serial(zone_name)
except os_win_exc.DNSZoneNotFound:
# Return None if the zone was not found
return None
def create_zone(self, zone):
"""Create a new DNS Zone"""
zone_name = zone.origin.to_text(omit_final_dot=True)
if isinstance(zone_name, bytes):
zone_name = zone_name.decode('utf-8')
LOG.debug("Creating zone: %s", zone_name)
try:
self._dnsutils.zone_create(
zone_name=zone_name,
zone_type=constants.DNS_ZONE_TYPE_SECONDARY,
ds_integrated=False,
ip_addrs=self._masters)
except os_win_exc.DNSZoneAlreadyExists:
# Zone already exists, check its properties to see if the
# existing zone is identical to the requested one
zone_properties = self._dnsutils.get_zone_properties(zone_name)
identical_zone_exists = (
zone_properties['zone_type'] == (
constants.DNS_ZONE_TYPE_SECONDARY) and
zone_properties['ds_integrated'] is False and
set(zone_properties['master_servers']) == set(self._masters))
if not identical_zone_exists:
raise
def update_zone(self, zone):
"""Instruct MSDNS to request an AXFR from MiniDNS.
"""
zone_name = zone.origin.to_text(omit_final_dot=True)
if isinstance(zone_name, bytes):
zone_name = zone_name.decode('utf-8')
LOG.debug("Updating zone: %s", zone_name)
self._dnsutils.zone_update(zone_name)
def delete_zone(self, zone_name):
"""Delete a DNS Zone
Do not raise exception if the zone does not exist.
"""
LOG.debug('Deleting zone: %s' % zone_name)
zone_name = zone_name.rstrip(".")
self._dnsutils.zone_delete(zone_name)
|
{
"content_hash": "d827d480368add2af7bba714f80c89e1",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 77,
"avg_line_length": 35.48275862068966,
"alnum_prop": 0.5999352121801101,
"repo_name": "openstack/designate",
"id": "182f2d3c177baec7f09288e07eaae7b96eb0b589",
"size": "3790",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "designate/backend/agent_backend/impl_msdns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "71074"
},
{
"name": "Jinja",
"bytes": "2004"
},
{
"name": "Mako",
"bytes": "1012"
},
{
"name": "Python",
"bytes": "2442862"
},
{
"name": "Shell",
"bytes": "46200"
}
],
"symlink_target": ""
}
|
"""Statewide Crime Data"""
from statsmodels.datasets import utils as du
__docformat__ = 'restructuredtext'
COPYRIGHT = """Public domain."""
TITLE = """Statewide Crime Data 2009"""
SOURCE = """
All data is for 2009 and was obtained from the American Statistical Abstracts except as indicated below.
"""
DESCRSHORT = """State crime data 2009"""
DESCRLONG = DESCRSHORT
#suggested notes
NOTE = """::
Number of observations: 51
Number of variables: 8
Variable name definitions:
state
All 50 states plus DC.
violent
Rate of violent crimes / 100,000 population. Includes murder, forcible
rape, robbery, and aggravated assault. Numbers for Illinois and
Minnesota do not include forcible rapes. Footnote included with the
American Statistical Abstract table reads:
"The data collection methodology for the offense of forcible
rape used by the Illinois and the Minnesota state Uniform Crime
Reporting (UCR) Programs (with the exception of Rockford, Illinois,
and Minneapolis and St. Paul, Minnesota) does not comply with
national UCR guidelines. Consequently, their state figures for
forcible rape and violent crime (of which forcible rape is a part)
are not published in this table."
murder
Rate of murders / 100,000 population.
hs_grad
Percent of population having graduated from high school or higher.
poverty
% of individuals below the poverty line
white
Percent of population that is one race - white only. From 2009 American
Community Survey
single
Calculated from 2009 1-year American Community Survey obtained obtained
from Census. Variable is Male householder, no wife present, family
household combined with Female householder, no husband present, family
household, divided by the total number of Family households.
urban
% of population in Urbanized Areas as of 2010 Census. Urbanized
Areas are area of 50,000 or more people."""
def load_pandas():
data = _get_data()
return du.process_pandas(data, endog_idx=2, exog_idx=[7, 4, 3, 5], index_idx=0)
def load():
"""
Load the statecrime data and return a Dataset class instance.
Returns
-------
Dataset
See DATASET_PROPOSAL.txt for more information.
"""
return load_pandas()
def _get_data():
return du.load_csv(__file__, 'statecrime.csv')
|
{
"content_hash": "ef69562acba91782679dd54e8787f9b3",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 104,
"avg_line_length": 33.770270270270274,
"alnum_prop": 0.6802721088435374,
"repo_name": "bashtage/statsmodels",
"id": "7d5530b8fe21279ee9c25370e7881a62b27ef31d",
"size": "2499",
"binary": false,
"copies": "3",
"ref": "refs/heads/main",
"path": "statsmodels/datasets/statecrime/data.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "625"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "Cython",
"bytes": "225838"
},
{
"name": "Fortran",
"bytes": "16671"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "100525"
},
{
"name": "Python",
"bytes": "14433387"
},
{
"name": "R",
"bytes": "106569"
},
{
"name": "Shell",
"bytes": "25329"
},
{
"name": "Stata",
"bytes": "50129"
}
],
"symlink_target": ""
}
|
"""Core eval alignment algorithms
"""
import warnings
from functools import partial, wraps
from pandas.compat import zip, range
import numpy as np
import pandas as pd
from pandas import compat
from pandas.errors import PerformanceWarning
from pandas.core.common import flatten
from pandas.core.computation.common import _result_type_many
def _align_core_single_unary_op(term):
if isinstance(term.value, np.ndarray):
typ = partial(np.asanyarray, dtype=term.value.dtype)
else:
typ = type(term.value)
ret = typ,
if not hasattr(term.value, 'axes'):
ret += None,
else:
ret += _zip_axes_from_type(typ, term.value.axes),
return ret
def _zip_axes_from_type(typ, new_axes):
axes = {}
for ax_ind, ax_name in compat.iteritems(typ._AXIS_NAMES):
axes[ax_name] = new_axes[ax_ind]
return axes
def _any_pandas_objects(terms):
"""Check a sequence of terms for instances of PandasObject."""
return any(isinstance(term.value, pd.core.generic.PandasObject)
for term in terms)
def _filter_special_cases(f):
@wraps(f)
def wrapper(terms):
# single unary operand
if len(terms) == 1:
return _align_core_single_unary_op(terms[0])
term_values = (term.value for term in terms)
# we don't have any pandas objects
if not _any_pandas_objects(terms):
return _result_type_many(*term_values), None
return f(terms)
return wrapper
@_filter_special_cases
def _align_core(terms):
term_index = [i for i, term in enumerate(terms)
if hasattr(term.value, 'axes')]
term_dims = [terms[i].value.ndim for i in term_index]
ndims = pd.Series(dict(zip(term_index, term_dims)))
# initial axes are the axes of the largest-axis'd term
biggest = terms[ndims.idxmax()].value
typ = biggest._constructor
axes = biggest.axes
naxes = len(axes)
gt_than_one_axis = naxes > 1
for value in (terms[i].value for i in term_index):
is_series = isinstance(value, pd.Series)
is_series_and_gt_one_axis = is_series and gt_than_one_axis
for axis, items in enumerate(value.axes):
if is_series_and_gt_one_axis:
ax, itm = naxes - 1, value.index
else:
ax, itm = axis, items
if not axes[ax].is_(itm):
axes[ax] = axes[ax].join(itm, how='outer')
for i, ndim in compat.iteritems(ndims):
for axis, items in zip(range(ndim), axes):
ti = terms[i].value
if hasattr(ti, 'reindex'):
transpose = isinstance(ti, pd.Series) and naxes > 1
reindexer = axes[naxes - 1] if transpose else items
term_axis_size = len(ti.axes[axis])
reindexer_size = len(reindexer)
ordm = np.log10(max(1, abs(reindexer_size - term_axis_size)))
if ordm >= 1 and reindexer_size >= 10000:
w = ('Alignment difference on axis {axis} is larger '
'than an order of magnitude on term {term!r}, by '
'more than {ordm:.4g}; performance may suffer'
).format(axis=axis, term=terms[i].name, ordm=ordm)
warnings.warn(w, category=PerformanceWarning, stacklevel=6)
f = partial(ti.reindex, reindexer, axis=axis, copy=False)
terms[i].update(f())
terms[i].update(terms[i].value.values)
return typ, _zip_axes_from_type(typ, axes)
def _align(terms):
"""Align a set of terms"""
try:
# flatten the parse tree (a nested list, really)
terms = list(flatten(terms))
except TypeError:
# can't iterate so it must just be a constant or single variable
if isinstance(terms.value, pd.core.generic.NDFrame):
typ = type(terms.value)
return typ, _zip_axes_from_type(typ, terms.value.axes)
return np.result_type(terms.type), None
# if all resolved variables are numeric scalars
if all(term.is_scalar for term in terms):
return _result_type_many(*(term.value for term in terms)).type, None
# perform the main alignment
typ, axes = _align_core(terms)
return typ, axes
def _reconstruct_object(typ, obj, axes, dtype):
"""Reconstruct an object given its type, raw value, and possibly empty
(None) axes.
Parameters
----------
typ : object
A type
obj : object
The value to use in the type constructor
axes : dict
The axes to use to construct the resulting pandas object
Returns
-------
ret : typ
An object of type ``typ`` with the value `obj` and possible axes
`axes`.
"""
try:
typ = typ.type
except AttributeError:
pass
res_t = np.result_type(obj.dtype, dtype)
if (not isinstance(typ, partial) and
issubclass(typ, pd.core.generic.PandasObject)):
return typ(obj, dtype=res_t, **axes)
# special case for pathological things like ~True/~False
if hasattr(res_t, 'type') and typ == np.bool_ and res_t != np.bool_:
ret_value = res_t.type(obj)
else:
ret_value = typ(obj).astype(res_t)
# The condition is to distinguish 0-dim array (returned in case of
# scalar) and 1 element array
# e.g. np.array(0) and np.array([0])
if len(obj.shape) == 1 and len(obj) == 1:
if not isinstance(ret_value, np.ndarray):
ret_value = np.array([ret_value]).astype(res_t)
return ret_value
|
{
"content_hash": "dbdd6942ba4790af8728381307532eac",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 79,
"avg_line_length": 31.39664804469274,
"alnum_prop": 0.5982206405693951,
"repo_name": "zfrenchee/pandas",
"id": "2e912b0075bfd3d623cb0828c11832c92b52cd3f",
"size": "5620",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/core/computation/align.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3847"
},
{
"name": "C",
"bytes": "470171"
},
{
"name": "C++",
"bytes": "17193"
},
{
"name": "HTML",
"bytes": "551706"
},
{
"name": "Makefile",
"bytes": "989"
},
{
"name": "PowerShell",
"bytes": "2970"
},
{
"name": "Python",
"bytes": "12658422"
},
{
"name": "Shell",
"bytes": "25785"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
}
|
class Liegewiese(object):
loc_index = 0
def enter(self):
if first_visit:
print "Du befindest die auf der Liegewiese des Schwimmbads."
else:
print "Du warst schon eimal hier."
class GertrudesBaum(object):
loc_index = 1
loc_name = 'Gertrude\'s Baum'
def enter(self):
if first_visit:
print "Du bist an Gertrudes Baum."
else:
print "Du warst schon eimal hier."
gertudesbaum = GertrudesBaum()
class Beckenrand(object):
loc_index = 2
loc_name = 'Beckenrand'
def enter(self):
if first_visit:
print "Du bist Beckenrand."
else:
print "Du warst schon eimal hier."
beckenrand = Beckenrand()
class Sprungturm(object):
loc_index = 3
loc_name = 'Sprungturm'
def enter(self):
if first_visit:
print "Du bist am Sprungturm."
else:
print "Du warst schon eimal hier."
sprungturm = Sprungturm()
class Schwimmbecken(object):
loc_index = 4
loc_name = 'Schwimmbecken'
def enter(self):
if first_visit:
print "Du bist im Schwimmbecken."
else:
print "Du warst schon eimal hier."
schwimmbecken = Schwimmbecken()
class Imbiss(object):
loc_index = 5
def enter(self):
if first_visit:
print "Du bist am Imbiss."
else:
print "Du warst schon eimal hier."
imbiss = Imbiss()
class Kassenhaus(object):
loc_index = 6
def enter(self):
if first_visit:
print "Du bist am Kassenhaus."
else:
print "Du warst schon eimal hier."
kassenhaus = Kassenhaus()
|
{
"content_hash": "6f99288f80d61a63b3ab4ee5e7913ef4",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 72,
"avg_line_length": 25.676923076923078,
"alnum_prop": 0.5835829838226483,
"repo_name": "empea-careercriminal/the_pool",
"id": "f4f2d52a2204a94ee5e20bf0a071bf3a7d8f8fd3",
"size": "1686",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "das_schwimmbad.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8046"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask, request, Response
app = Flask(__name__)
SLACK_WEBHOOK_SECRET = ""
SLACK_WEBHOOK_SECRET = os.environ.get('SLACK_TOKEN')
if (SLACK_WEBHOOK_SECRET==""):
print "ERROR: Missing environment variable: SLACK_WEBHOOK_SECRET"
exit()
SLACK_WEBHOOK_SECRET = os.environ.get('SLACK_WEBHOOK_SECRET')
@app.route('/slack', methods=['POST'])
def inbound():
if request.form.get('token') == SLACK_WEBHOOK_SECRET:
#print "Token correct, proceeding..."
channel = request.form.get('channel_name')
username = request.form.get('user_name')
text = request.form.get('text')
inbound_message = username + " in " + channel + " says: " + text
print(inbound_message)
else:
pass
#print "Token incorrect!"
return Response(), 200
@app.route('/', methods=['GET'])
def test():
return Response('It works!')
if __name__ == "__main__":
app.run(debug=True)
|
{
"content_hash": "240dfb063ffa8d9e9f4a93167a3442d1",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 72,
"avg_line_length": 23.9,
"alnum_prop": 0.6234309623430963,
"repo_name": "BartGo/python-slack-drafts",
"id": "4fdde9eee00c40c4963c4bc0a9f78eb86eeb7977",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "receive.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2933"
},
{
"name": "Shell",
"bytes": "1016"
}
],
"symlink_target": ""
}
|
from utils import run_cmd
from utils import enter_depend_test
enter_depend_test()
from depend_test_framework.core import Action, ParamsRequire, Provider, Consumer
@Action.decorator(1)
@ParamsRequire.decorator(['guest_name', 'target_host'])
@Consumer.decorator('$guest_name.active', Consumer.REQUIRE)
@Consumer.decorator('$target_host.$guest_name.active', Consumer.REQUIRE_N)
@Provider.decorator('$target_host.$guest_name.active', Provider.SET)
@Provider.decorator('$guest_name.active', Provider.CLEAR)
def migrate(params, env):
target = params.target_host
guest_name = params.guest_name
cmd = 'virsh migrate %s qemu+ssh://%s/system --live' % (guest_name, target)
if params.mock:
params.logger.info("Mock: " + cmd)
return
run_cmd(cmd)
|
{
"content_hash": "546fbf5208636ca814140d7f40feafbb",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 80,
"avg_line_length": 36.80952380952381,
"alnum_prop": 0.7218628719275549,
"repo_name": "LuyaoHuang/depend-test-framework",
"id": "512452bf627ef2ae9d4d7e908d2d810d08b717ba",
"size": "773",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/migration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "110103"
}
],
"symlink_target": ""
}
|
""" Sahana Eden Module Automated Tests - HRM005 Add Staff To Organization
@copyright: 2011-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
from gluon import current
import unittest
from tests.web2unittest import SeleniumUnitTest
from selenium.common.exceptions import NoSuchElementException
from s3 import s3_debug
from tests import *
#import unittest, re, time
import time
class AddStaffToOrganisation(SeleniumUnitTest):
def test_hrm005_add_staff_to_organization(self):
"""
@case: HRM005
@description: Add a premade made staff to a Organisation
@TestDoc: https://docs.google.com/spreadsheet/ccc?key=0AmB3hMcgB-3idG1XNGhhRG9QWF81dUlKLXpJaFlCMFE
@Test Wiki: http://eden.sahanafoundation.org/wiki/DeveloperGuidelines/Testing
"""
browser = self.browser
config = self.config
self.login(account="admin", nexturl="org/organisation")
self.dt_filter("Sri Lanka Red Cross Society")
self.dt_action()
url = browser.current_url
url_parts = url.split("/")
try:
org_id = int(url_parts[-2])
except:
org_id = int(url_parts[-1])
browser.get("%s/org/organisation/%s/human_resource" % (config.url, org_id))
self.create("hrm_human_resource",
[( "first_name",
"Herculano",
"pr_person"),
( "last_name",
"Hugh",
"pr_person"),
( "email",
"herculandfo@icandodfmybest.com",
"pr_person"),
( "job_title_id",
"Secretary General",
"option"),
]
)
|
{
"content_hash": "ee6791a9a2aebe1da614ae6b374b0925",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 110,
"avg_line_length": 39.75675675675676,
"alnum_prop": 0.6203263086335826,
"repo_name": "ashwyn/eden-message_parser",
"id": "ce0715511024edfe0b4e2d93da7bcb6ce5c986b8",
"size": "2942",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tests/hrm/add_staff_to_organisation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "15238074"
},
{
"name": "PHP",
"bytes": "15220"
},
{
"name": "Perl",
"bytes": "2202"
},
{
"name": "Python",
"bytes": "22506004"
},
{
"name": "Racket",
"bytes": "166"
}
],
"symlink_target": ""
}
|
"""
Crossfilter
------
Crossfilter.
"""
from jinja2 import Template
import json
#from .utilities import color_brewer, _parse_size, legend_scaler, _locations_mirror, _locations_tolist, write_png,\
# image_to_url
#from .six import text_type, binary_type
from folium.element import Figure, JavascriptLink, CssLink, Div, MacroElement
from folium.map import FeatureGroup
from .heat_map import HeatMap
#from .map import Map, TileLayer, Icon, Marker, Popup
class Crossfilter(Div):
def __init__(self, data, **kwargs):
"""Create a Crossfilter
Returns
-------
Folium Crossfilter Object
"""
super(Crossfilter, self).__init__(**kwargs)
self._name = 'Crossfilter'
self.data = data
self.add_children(MacroElement("""
{% macro script(this, kwargs) %}
var {{this._parent.get_name()}} = {};
{{this._parent.get_name()}}.data = {{this._parent.data}};
{{this._parent.get_name()}}.crossfilter = crossfilter({{this._parent.get_name()}}.data);
{{this._parent.get_name()}}.allDim = {{this._parent.get_name()}}.crossfilter.dimension(
function(d) {return d;});
{% endmacro %}
"""))
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}">
{{this.html.render(**kwargs)}}
</div>
{% endmacro %}
{% macro script(this, kwargs) %}
dc.renderAll();
{% endmacro %}
""")
def render(self,**kwargs):
super(Crossfilter,self).render(**kwargs)
figure = self._parent.get_root()
assert isinstance(figure,Figure), ("You cannot render this Element "
"if it's not in a Figure.")
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/dc/1.7.5/dc.css"),
name='dcjs_css')
figure.header.add_children(
CssLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.css"),
name='leaflet_css')
figure.header.add_children(
CssLink("https://maxcdn.bootstrapcdn.com/bootstrap/3.2.0/css/bootstrap.min.css"),
name='bootstrap_css')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/d3/3.5.6/d3.min.js"),
name='d3js')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/crossfilter/1.3.12/crossfilter.min.js"),
name='crossfilterjs')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/dc/2.0.0-beta.20/dc.js"),
name='dcjs')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/leaflet/0.7.3/leaflet.js"),
name='leaflet')
figure.header.add_children(
JavascriptLink("https://cdnjs.cloudflare.com/ajax/libs/underscore.js/1.8.3/underscore-min.js"),
name='underscorejs')
class PieFilter(Div):
def __init__(self, crossfilter, column, name="", width=150, height=150, inner_radius=20,
weight=None, order=None, colors=None, label=None, **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(PieFilter, self).__init__(width=width, height=height, **kwargs)
self._name = 'PieFilter'
self.crossfilter = crossfilter
self.column = column
self.name = name
self.width = width
self.height = height
self.inner_radius = inner_radius
self.order = order
self.weight = weight
self.colors = [x for x in colors] if colors else None
self.label = label
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {return d["{{this.column}}"];});
document.getElementById("{{this.get_name()}}").innerHTML =
'<h4>{{this.name}} <small><a id="{{this.get_name()}}-reset">reset</a></small></h4>'
+ '<div id="{{this.get_name()}}-chart" class="dc-chart"></div>';
{{this.get_name()}}.chart = dc.pieChart('#{{this.get_name()}}-chart')
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.innerRadius({{this.inner_radius}})
{% if this.label %}.label({{this.label}}){% endif %}
{% if this.colors %}.ordinalColors({{this.colors}}){% endif %}
{% if this.order %}.ordering(function (d) {
var out = null;
var order={{this.order}};
for (var j=0;j<order.length;j++) {
if (order[j]==d.key) {out = 1+j;}
}
return out;}){% endif %};
d3.selectAll('#{{this.get_name()}}-reset').on('click',function () {
{{this.get_name()}}.chart.filterAll();
dc.redrawAll();
});
{% endmacro %}
""")
class RowBarFilter(Div):
"""TODO docstring here
Parameters
----------
"""
def __init__(self, crossfilter, column, name="", width=150, height=150, inner_radius=20,
weight=None, order=None, elastic_x=True, colors=None, **kwargs):
super(RowBarFilter, self).__init__(width=width, height=height, **kwargs)
self._name = 'RowBarFilter'
self.crossfilter = crossfilter
self.column = column
self.name = name
self.width = width
self.height = height
self.inner_radius = inner_radius
self.order = order
self.weight = weight
self.elastic_x = elastic_x
self.colors = [x for x in colors] if colors else None
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {return d["{{this.column}}"];});
document.getElementById("{{this.get_name()}}").innerHTML =
'<h4>{{this.name}} <small><a id="{{this.get_name()}}-reset">reset</a></small></h4>'
+ '<div id="{{this.get_name()}}-chart" class="dc-chart"></div>';
{{this.get_name()}}.chart = dc.rowChart('#{{this.get_name()}}-chart')
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.elasticX({{this.elastic_x.__str__().lower()}})
{% if this.colors %}.ordinalColors({{this.colors}}){% endif %}
{% if this.order %}.ordering(function (d) {
var out = null;
var order={{this.order}};
for (var j=0;j<order.length;j++) {
if (order[j]==d.key) {out = 1+j;}
}
return out;}){% endif %};
d3.selectAll('#{{this.get_name()}}-reset').on('click',function () {
{{this.get_name()}}.chart.filterAll();
dc.redrawAll();
});
{% endmacro %}
""")
class BarFilter(Div):
def __init__(self, crossfilter, column, width=150, height=150, bar_padding=0.1,
domain=None, groupby=None, xlabel="", ylabel="", margins=None,
weight=None, elastic_y=True, xticks=None, time_format=None, **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(BarFilter, self).__init__(**kwargs)
self._name = 'BarFilter'
self.crossfilter = crossfilter
self.column = column
self.width=width
self.height=height
self.bar_padding=bar_padding
self.domain=json.dumps(domain)
self.groupby=groupby
self.xlabel=xlabel
self.ylabel=ylabel
self.margins=json.dumps(margins)
self.xticks=json.dumps(xticks)
self.time_format=time_format
self.weight = weight
self.elastic_y = elastic_y
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {
domain : {{this.domain}},
groupby : {{this.groupby}},
xAxisTickValues : {{this.xticks}},
};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {
return Math.floor(
(d["{{this.column}}"]-{{this.get_name()}}.domain[0])/{{this.get_name()}}.groupby)
+{{this.get_name()}}.domain[0]/{{this.get_name()}}.groupby;
});
{{this.get_name()}}.ticks = [];
for (var j=0; j<{{this.get_name()}}.xAxisTickValues.length; j++) {
{{this.get_name()}}.ticks[j] = {{this.get_name()}}.xAxisTickValues[j]/{{this.get_name()}}.groupby;
}
dc.barChart("#{{this.get_name()}}")
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.x(d3.scale.linear().domain([
{{this.get_name()}}.domain[0]/{{this.get_name()}}.groupby,
{{this.get_name()}}.domain[1]/{{this.get_name()}}.groupby,
]))
.elasticY({{this.elastic_y.__str__().lower()}})
.centerBar(false)
.barPadding({{this.bar_padding}})
.xAxisLabel("{{this.xlabel}}")
.yAxisLabel("{{this.ylabel}}")
.margins({{this.margins}})
.xAxis()
.tickValues({{this.get_name()}}.ticks)
.tickFormat(function(x){
{%if this.time_format %}
var dateformat = d3.time.format("{{this.time_format}}");
return dateformat(new Date(x*{{this.get_name()}}.groupby));
{% else %}
return x*{{this.get_name()}}.groupby;
{% endif %}
});
{% endmacro %}
""")
class FeatureGroupFilter(FeatureGroup):
def __init__(self, crossfilter, name=None, fit_bounds=False,
circle_radius=None, color="#0000ff", opacity=1., **kwargs):
"""
"""
super(FeatureGroupFilter, self).__init__(**kwargs)
self._name = 'FeatureGroupFilter'
self.tile_name = name if name is not None else self.get_name()
self.crossfilter = crossfilter
self.fit_bounds = fit_bounds
self.circle_radius = circle_radius
self.color = color
self.opacity = opacity
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.feature_group = new L.FeatureGroup();
{{this.get_name()}}.updateFun = function() {
this.feature_group.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity)
for (var i in dimVals) {
var d = dimVals[i];
var marker =
{% if this.circle_radius %}L.circleMarker([d.lat, d.lng],
{
fillColor: '{{ this.color }}',
fillOpacity: {{ this.opacity }}
}).setRadius({{this.circle_radius}})
{% else %}L.marker([d.lat, d.lng],{opacity:{{this.opacity}} }){% endif %};
marker.bindPopup(d.popup);
this.feature_group.addLayer(marker);
}
{{this._parent.get_name()}}.addLayer(this.feature_group);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.feature_group.getBounds());{% endif %}
}
dc.dataTable('#foo')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""")
class TableFilter(Div):
def __init__(self, crossfilter, columns, size=10, sort_by=None, ascending=True, **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(TableFilter, self).__init__(**kwargs)
self._name = 'TableFilter'
self.crossfilter = crossfilter
self.columns = columns
self.sort_by = sort_by
self.ascending = ascending
self.size = size
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<table id="{{this.get_name()}}" class="{{this.class_}}">
<thead>
<tr class="header">
{%for col in this.columns%}<th>{{col}}</th>{% endfor %}
</tr>
</thead>
</table>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dataTable = dc.dataTable('#{{this.get_name()}}');
{{this.get_name()}}.dataTable
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js extra line'; })
.size({{this.size}})
.columns([
{% for col in this.columns %}
function (d) { return d["{{col}}"]; },
{% endfor %}
])
{%if this.sort_by %}.sortBy(dc.pluck('{this.sort_by}'))
{%if this.ascending %}.order(d3.ascending){% else %}.order(d3.descending){% endif %}
{% endif %}
.on('renderlet', function (table) {
table.select('tr.dc-table-group').remove();
});
{% endmacro %}
""")
class CountFilter(Div):
def __init__(self, crossfilter, html_template="{filter}/{total}", **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(CountFilter, self).__init__(**kwargs)
self._name = 'CountFilter'
self.crossfilter = crossfilter
self.html_template = html_template
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">
{{this.html_template.format(
filter='<span class="filter-count"></span>',
total='<span class="total-count"></span>'
)}}
</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.dataCount = dc.dataCount("#{{this.get_name()}}")
.dimension({{this.crossfilter.get_name()}}.crossfilter)
.group({{this.crossfilter.get_name()}}.crossfilter.groupAll()
);
{% endmacro %}
""")
class ResetFilter(Div):
def __init__(self, html="Reset all", **kwargs):
"""TODO docstring here
Parameters
----------
"""
super(ResetFilter, self).__init__(**kwargs)
self._name = 'ResetFilter'
self.html = html
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<a id="{{this.get_name()}}" class="{{this.class_}} reset-filters">{{this.html}}</a>
{% endmacro %}
{% macro script(this, kwargs) %}
d3.selectAll('.reset-filters').on('click', function () {
dc.filterAll();
dc.renderAll();
});
{% endmacro %}
""")
class HeatmapFilter(HeatMap):
def __init__(self, crossfilter, name=None, fit_bounds=False, **kwargs):
"""
"""
super(HeatmapFilter, self).__init__([],**kwargs)
self._name = 'HeatmapFilter'
self.crossfilter = crossfilter
self.fit_bounds = fit_bounds
self._template = Template(u"""
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.heatmap = new L.heatLayer(
{},
{
minOpacity: {{this.min_opacity}},
maxZoom: {{this.max_zoom}},
max: {{this.max_val}},
radius: {{this.radius}},
blur: {{this.blur}},
gradient: {{this.gradient}}
})
.addTo({{this._parent.get_name()}});
{{this.get_name()}}.updateFun = function() {
// this.heatmap.clearLayers();
var dimVals = {{this.crossfilter.get_name()}}.allDim.top(Infinity);
var latlngs = [];
for (var i in dimVals) {
var d = dimVals[i];
latlngs.push([d.lat, d.lng]);
}
{{this.get_name()}}.heatmap.setLatLngs(latlngs);
{% if this.fit_bounds %}{{this._parent.get_name()}}
.fitBounds(this.heatmap.getBounds());{% endif %}
}
dc.dataTable('#foo')
.dimension({{this.crossfilter.get_name()}}.allDim)
.group(function (d) { return 'dc.js';})
.on('renderlet', function (table) { {{this.get_name()}}.updateFun();});
{{this.get_name()}}.updateFun();
{% endmacro %}
""")
class GeoChoroplethFilter(Div):
"""TODO docstring here
Parameters
----------
"""
def __init__(self, crossfilter, column, geojson, key_on='feature.properties.name',
name="", width=150, height=150, inner_radius=20,
weight=None, order=None, elastic_x=True, projection=None,
colors=None, **kwargs):
super(GeoChoroplethFilter, self).__init__(width=width, height=height, **kwargs)
self._name = 'GeoChoroplethFilter'
self.crossfilter = crossfilter
self.column = column
self.geojson = geojson
self.key_on = key_on
self.name = name
self.width = width
self.height = height
self.projection = projection
self.inner_radius = inner_radius
self.order = order
self.weight = weight
self.elastic_x = elastic_x
self.colors = colors if colors else None
self._template = Template(u"""
{% macro header(this, kwargs) %}
<style> #{{this.get_name()}} {
{% if this.position %}position : {{this.position}};{% endif %}
{% if this.width %}width : {{this.width[0]}}{{this.width[1]}};{% endif %}
{% if this.height %}height: {{this.height[0]}}{{this.height[1]}};{% endif %}
{% if this.left %}left: {{this.left[0]}}{{this.left[1]}};{% endif %}
{% if this.top %}top: {{this.top[0]}}{{this.top[1]}};{% endif %}
}
</style>
{% endmacro %}
{% macro html(this, kwargs) %}
<div id="{{this.get_name()}}" class="{{this.class_}}">{{this.html.render(**kwargs)}}</div>
{% endmacro %}
{% macro script(this, kwargs) %}
var {{this.get_name()}} = {};
{{this.get_name()}}.geojson = {{this.geojson}};
{{this.get_name()}}.dimension = {{this.crossfilter.get_name()}}.crossfilter.dimension(
function(d) {return d["{{this.column}}"];});
document.getElementById("{{this.get_name()}}").innerHTML =
'<h4>{{this.name}} <small><a id="{{this.get_name()}}-reset">reset</a></small></h4>'
+ '<div id="{{this.get_name()}}-chart" class="dc-chart"></div>';
{{this.get_name()}}.chart = dc.geoChoroplethChart('#{{this.get_name()}}-chart')
.width({{this.width}})
.height({{this.height}})
.dimension({{this.get_name()}}.dimension)
.group({{this.get_name()}}.dimension.group()
{% if this.weight %}.reduceSum(function(d) {return d["{{this.weight}}"];})
{% else %}.reduceCount(){% endif %}
)
.overlayGeoJson({{this.get_name()}}.geojson.features, "state",
function (feature) {return {{this.key_on}};}
)
{% if this.projection %}.projection({{this.projection}}){% endif %}
{% if this.colors %}.colors({{this.colors}}){% endif %}
{% if this.order %}.ordering(function (d) {
var out = null;
var order={{this.order}};
for (var j=0;j<order.length;j++) {
if (order[j]==d.key) {out = 1+j;}
}
return out;}){% endif %};
d3.selectAll('#{{this.get_name()}}-reset').on('click',function () {
{{this.get_name()}}.chart.filterAll();
dc.redrawAll();
});
{% endmacro %}
""")
|
{
"content_hash": "2feec77b8b886c7422710f6c470bbd42",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 115,
"avg_line_length": 43.574675324675326,
"alnum_prop": 0.4758214738097012,
"repo_name": "BibMartin/folium",
"id": "d919ae4985e98d1fa1f4764fb38254f788e7d230",
"size": "26866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "folium/plugins/crossfilter.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39887"
},
{
"name": "HTML",
"bytes": "11539"
},
{
"name": "JavaScript",
"bytes": "5973"
},
{
"name": "Python",
"bytes": "269703"
}
],
"symlink_target": ""
}
|
import pkgutil
from io import StringIO
import numpy as np
import pandas as pd
from scattertext.Common import DEFAULT_BACKGROUND_SCALER_ALGO, DEFAULT_BACKGROUND_BETA
from scattertext.termscoring import ScaledFScore
class TermCategoryFrequencies(object):
'''
This class allows you to produce scatter plots of raw term frequency counts.
Occasionally, only term frequency statistics are available. This may happen in the case of very large,
lost, or proprietary data sets. `TermCategoryFrequencies` is a corpus representation,that can accept this
sort of data, along with any categorized documents that happen to be available.
Let use the [Corpus of Contemporary American English](https://corpus.byu.edu/coca/) as an example.
We'll construct a visualization
to analyze the difference between spoken American English and English that occurs in fiction.
```python
convention_df = (pd.read_excel('https://www.wordfrequency.info/files/genres_sample.xls')
.dropna()
.set_index('lemma')[['SPOKEN', 'FICTION']]
.iloc[:1000])
convention_df.head()
SPOKEN FICTION
lemma
the 3859682.0 4092394.0
I 1346545.0 1382716.0
they 609735.0 352405.0
she 212920.0 798208.0
would 233766.0 229865.0
```
Transforming this into a visualization is extremely easy. Just pass a dataframe indexed on
terms with columns indicating category-counts into the the `TermCategoryFrequencies` constructor.
```python
term_cat_freq = st.TermCategoryFrequencies(convention_df)
```
And call `produce_scattertext_explorer` normally:
```python
html = st.produce_scattertext_explorer(
term_cat_freq,
category='SPOKEN',
category_name='Spoken',
not_category_name='Fiction',
)
```
[](https://jasonkessler.github.io/demo_category_frequencies.html)
If you'd like to incorporate some documents into the visualization, you can add them into to the
`TermCategoyFrequencies` object.
First, let's extract some example Fiction and Spoken documents from the sample COCA corpus.
```python
import requests, zipfile, io
coca_sample_url = 'http://corpus.byu.edu/cocatext/samples/text.zip'
zip_file = zipfile.ZipFile(io.BytesIO(requests.get(coca_sample_url).content))
document_df = pd.DataFrame(
[{'text': zip_file.open(fn).read().decode('utf-8'),
'category': 'SPOKEN'}
for fn in zip_file.filelist if fn.filename.startswith('w_spok')][:2]
+ [{'text': zip_file.open(fn).read().decode('utf-8'),
'category': 'FICTION'}
for fn in zip_file.filelist if fn.filename.startswith('w_fic')][:2])
```
And we'll pass the `documents_df` dataframe into `TermCategoryFrequencies` via the `document_category_df`
parameter. Ensure the dataframe has two columns, 'text' and 'category'. Afterward, we can
call `produce_scattertext_explorer` (or your visualization function of choice) normally.
```python
doc_term_cat_freq = st.TermCategoryFrequencies(convention_df, document_category_df=document_df)
html = st.produce_scattertext_explorer(
doc_term_cat_freq,
category='SPOKEN',
category_name='Spoken',
not_category_name='Fiction',
)
```
'''
def __init__(self,
category_frequency_df,
document_category_df=None,
metadata_frequency_df=None,
unigram_frequency_path=None):
'''
Parameters
----------
category_frequency_df : pd.DataFrame
Index is term, columns are categories, values are counts
document_category_df : pd.DataFrame, optional
Columns are text, category. Values are text (string) and category (string)
metadata_frequency_df : pd.DataFrame, optional
Index is term, columns are categories, values are counts
unigram_frequency_path : See TermDocMatrix, optional
'''
if document_category_df is not None:
#assert set(document_category_df.columns) == set(['text', 'category'])
assert 'text' in document_category_df.columns and 'category' in document_category_df.columns
self._document_category_df = document_category_df
self.metadata_frequency_df = metadata_frequency_df
self.term_category_freq_df = category_frequency_df
self._unigram_frequency_path = unigram_frequency_path
def get_num_terms(self):
return len(self.term_category_freq_df)
def get_categories(self):
return list(self.term_category_freq_df.columns)
def get_num_metadata(self):
return len(self.metadata_frequency_df)
def get_scaled_f_scores_vs_background(self,
scaler_algo=DEFAULT_BACKGROUND_SCALER_ALGO,
beta=DEFAULT_BACKGROUND_BETA):
df = self.get_term_and_background_counts()
df['Scaled f-score'] = ScaledFScore.get_scores_for_category(
df['corpus'], df['background'], scaler_algo, beta
)
return df.sort_values(by='Scaled f-score', ascending=False)
def get_term_and_background_counts(self):
'''
Returns
-------
A pd.DataFrame consisting of unigram term counts of words occurring
in the TermDocumentMatrix and their corresponding background corpus
counts. The dataframe has two columns, corpus and background.
>>> corpus.get_unigram_corpus().get_term_and_background_counts()
corpus background
obama 702.0 565739.0
romney 570.0 695398.0
barack 248.0 227861.0
...
'''
background_df = self._get_background_unigram_frequencies()
corpus_freq_df = pd.DataFrame({'corpus': self.term_category_freq_df.sum(axis=1)})
corpus_unigram_freq = corpus_freq_df.loc[[w for w in corpus_freq_df.index if ' ' not in w]]
df = corpus_unigram_freq.join(background_df, how='outer').fillna(0)
return df
def _get_background_unigram_frequencies(self):
if self._unigram_frequency_path:
unigram_freq_table_buf = open(self._unigram_frequency_path)
else:
unigram_freq_table_buf = StringIO(pkgutil.get_data('scattertext', 'data/count_1w.txt')
.decode('utf-8'))
to_ret = (pd.read_table(unigram_freq_table_buf,
names=['word', 'background'])
.sort_values(ascending=False, by='background')
.drop_duplicates(['word'])
.set_index('word'))
return to_ret
def list_extra_features(self):
raise Exception("Not implemented in TermCategoryFrequencies")
def get_doc_indices(self):
'''
Returns
-------
np.array
Integer document indices
'''
if self._document_category_df is None:
return np.array([])
categories_d = {d: i for i, d in enumerate(self.get_categories())}
return self._document_category_df.category.apply(categories_d.get).values
def get_texts(self):
'''
Returns
-------
np.array
Texts
'''
if self._document_category_df is None:
return np.array([])
return self._document_category_df.text.values
def get_term_category_frequencies(self, scatterchartdata):
'''
Parameters
----------
scatterchartdata : ScatterChartData
Returns
-------
pd.DataFrame
'''
df = self.term_category_freq_df.rename(
columns={c: c + ' freq' for c in self.term_category_freq_df}
)
df.index.name = 'term'
return df
def apply_ranker(self, term_ranker):
'''
Parameters
----------
term_ranker : TermRanker
We'll ignore this
Returns
-------
pd.Dataframe
'''
return self.get_term_category_frequencies(None)
|
{
"content_hash": "056530a8f962ae2f22f45d1c1eddcec3",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 161,
"avg_line_length": 32.83111111111111,
"alnum_prop": 0.6925680249086232,
"repo_name": "JasonKessler/scattertext",
"id": "e8fce4c345066634f1c2b1faf8e2f018c1e62a1c",
"size": "7387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scattertext/TermCategoryFrequencies.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1035"
},
{
"name": "HTML",
"bytes": "52028"
},
{
"name": "JavaScript",
"bytes": "497904"
},
{
"name": "Python",
"bytes": "1183530"
},
{
"name": "Shell",
"bytes": "306"
}
],
"symlink_target": ""
}
|
def Execute():
pass
|
{
"content_hash": "c802c9ae7f0dcaaaf54566c803fdfbb6",
"timestamp": "",
"source": "github",
"line_count": 2,
"max_line_length": 14,
"avg_line_length": 12,
"alnum_prop": 0.5833333333333334,
"repo_name": "TeradataCenterForHadoop/ambari-presto-service",
"id": "244ed0dbac6f6997ddef2bc5ce039049b220e51b",
"size": "24",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/resource_management/core/resources/system.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1547"
},
{
"name": "Python",
"bytes": "50102"
}
],
"symlink_target": ""
}
|
import unittest2
from .test_data import * # flake8: noqa
from onfido import Api
class DummyApiRequestor(object):
def post(self, url, params, file=None):
return {
"url": url,
"params": params,
"method": "post",
"file": file
}
def get(self, url, params):
return {
"url": url,
"params": params,
"method": "get"
}
class ResourceTestCase(unittest2.TestCase):
def setup(self):
self.api = Api("", DummyApiRequestor())
# verify the interface to the API is as-expected, so just fire through
# some very simple data and so long as python doesn't barf we're all good.
class InterfaceTests(ResourceTestCase):
def test_applicants_interface(self):
self.setup()
self.api.Applicants.create(test_applicant)
self.api.Applicants.find(test_applicant_id)
self.api.Applicants.all()
self.api.Applicants.all(page=test_page_no,
per_page=test_per_page)
def test_documents_interface(self):
self.setup()
self.api.Documents.create(test_applicant_id, test_document, test_document_filename, test_document_type)
def test_checks_interface(self):
self.setup()
self.api.Checks.create(test_applicant_id, test_check)
self.api.Checks.find(test_applicant_id, test_check_id)
self.api.Checks.all(test_applicant_id)
self.api.Checks.all(test_applicant_id,
page=test_page_no,
per_page=test_per_page)
def test_reports_interface(self):
self.setup()
self.api.Reports.find(test_check_id, test_report_id)
self.api.Reports.all(test_check_id)
def test_addresspicker_interface(self):
self.setup()
self.api.AddressPicker.all(test_postcode)
# check we generate the appropriate path part of request url
# and use the correct http method
class ResourcePathAndMethodTests(ResourceTestCase):
def test_applicants_create_path_method(self):
self.setup()
result = self.api.Applicants.create(test_applicant)
self.assertEqual("applicants", result["url"])
self.assertEqual("post", result["method"])
def test_applicants_find_path_method(self):
self.setup()
result = self.api.Applicants.find(test_applicant_id)
self.assertEqual("applicants/{0}".format(test_applicant_id),
result["url"])
self.assertEqual("get", result["method"])
def test_applicants_list_path_method(self):
self.setup()
result = self.api.Applicants.all()
self.assertEqual("applicants", result["url"])
self.assertEqual("get", result["method"])
def test_applicant_list_paginate_path_method(self):
self.setup()
result = self.api.Applicants.all(page=test_page_no,
per_page=test_per_page)
self.assertEqual("applicants", result["url"])
self.assertEqual("get", result["method"])
self.assertEqual(test_page_no, result["params"]["page"])
self.assertEqual(test_per_page, result["params"]["per_page"])
def test_documents_path_method(self):
self.setup()
result = self.api.Documents.create(test_applicant_id, test_document, test_document_filename, test_document_type)
self.assertEqual(
"applicants/{0}/documents/".format(test_applicant_id),
result["url"])
self.assertEqual("post", result["method"])
def test_checks_create_path_method(self):
self.setup()
result = self.api.Checks.create(test_applicant_id,
test_check)
self.assertEqual(
"applicants/{0}/checks".format(test_applicant_id),
result["url"])
self.assertEqual("post", result["method"])
def test_checks_find_path_method(self):
self.setup()
result = self.api.Checks.find(test_applicant_id,
test_check_id)
self.assertEqual(
"applicants/{0}/checks/{1}".format(test_applicant_id,
test_check_id),
result["url"])
self.assertEqual("get", result["method"])
def test_checks_list_path_method(self):
self.setup()
result = self.api.Checks.all(test_applicant_id)
self.assertEqual(
"applicants/{0}/checks".format(test_applicant_id),
result["url"])
self.assertEqual("get", result["method"])
def test_checks_list_paginate_path_method(self):
self.setup()
result = self.api.Checks.all(test_applicant_id,
page=test_page_no,
per_page=test_per_page)
self.assertEqual(
"applicants/{0}/checks".format(test_applicant_id),
result["url"])
self.assertEqual(test_page_no, result["params"]["page"])
self.assertEqual(test_per_page, result["params"]["per_page"])
self.assertEqual("get", result["method"])
def test_reports_find_path_method(self):
self.setup()
result = self.api.Reports.find(test_check_id,
test_report_id)
self.assertEqual(
"checks/{0}/reports/{1}".format(test_check_id,
test_report_id),
result["url"])
self.assertEqual("get", result["method"])
def test_reports_list_path_method(self):
self.setup()
result = self.api.Reports.all(test_check_id)
self.assertEqual("checks/{0}/reports".format(test_check_id),
result["url"])
self.assertEqual("get", result["method"])
def test_address_list_path_method(self):
self.setup()
result = self.api.AddressPicker.all(test_postcode)
self.assertEqual("applicants/addresses/pick", result["url"])
self.assertEqual("get", result["method"])
|
{
"content_hash": "e51d9bc1426b5f0bd69dce87dd780547",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 120,
"avg_line_length": 37.574074074074076,
"alnum_prop": 0.5832101199277148,
"repo_name": "AdamStelmaszczyk/pyonfido",
"id": "4f87d2ddddffe7ad66576f37719ca21efa3b10f9",
"size": "6087",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "onfido/test/test_resources.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20961"
}
],
"symlink_target": ""
}
|
import unittest
import tempfile
import os
from calvin.utilities.calconfig import CalConfig
class TestBase(unittest.TestCase):
def setUp(self):
self.filepath = None
f, self.filepath = tempfile.mkstemp()
os.unlink(self.filepath)
self._env = os.environ
print "hej"
def tearDown(self):
if self.filepath and os.path.exists(self.filepath):
os.unlink(self.filepath)
print "da"
os.environ = self._env
class CalConfigTests(TestBase):
@unittest.skip("Currently, file is not created automatically")
def test_create_default_config(self):
self.assertEqual(os.path.exists(self.filepath), False)
os.environ['CALVIN_CONFIG_PATH'] = self.filepath
CalConfig()
self.assertEqual(os.path.exists(self.filepath), True)
with open(self.filepath, "rb") as f:
content = f.readlines()
self.assertGreater(len(content), 1)
def test_set_get(self):
self.assertEqual(os.path.exists(self.filepath), False)
print os.environ
os.environ['CALVIN_CONFIG_PATH'] = self.filepath
_conf = CalConfig()
_conf.add_section("test")
for a in range(10):
_conf.set("test", "BANAN%d" % a, str(a))
self.assertEqual(_conf.get("test", "BANAN%d" % a), str(a))
for a in range(10):
_conf.set("test", "BANAN%d" % a, a)
self.assertEqual(_conf.get("test", "BANAN%d" % a), a)
for a in range(10):
_conf.set("test", "BANAN%d" % a, range(10))
self.assertEqual(_conf.get("test", "BANAN%d" % a), range(10))
# Should this two become the same ?
for a in range(10):
_conf.set("test", "BANAN%d" % a, dict(zip(range(10), range(10))))
self.assertEqual(_conf.get("test", "BANAN%d" % a), dict(zip([str(x) for x in range(10)], range(10))))
for a in range(10):
_conf.set("test", "BANAN%d" % a, dict(zip([str(x) for x in range(10)], range(10))))
self.assertEqual(_conf.get("test", "BANAN%d" % a), dict(zip([str(x) for x in range(10)], range(10))))
for a in range(10):
_conf.set("test", "BANAN%d" % a, dict(zip([str(x) for x in range(10)], [range(10)])))
self.assertEqual(_conf.get("test", "BANAN%d" % a), dict(zip([str(x) for x in range(10)], [range(10)])))
def test_env_override(self):
self.assertEqual(os.path.exists(self.filepath), False)
os.environ['CALVIN_CONFIG_PATH'] = self.filepath
_conf = CalConfig()
_conf.add_section("test")
for a in range(10):
_conf.set("test", "APA%d" % a, "%d" % a)
os.environ['CALVIN_APA%d' % a] = "100"
self.assertEqual(_conf.get("test", "APA%d" % a), 100)
def test_lists(self):
self.assertEqual(os.path.exists(self.filepath), False)
os.environ['CALVIN_CONFIG_PATH'] = self.filepath
_conf = CalConfig()
_conf.add_section("test")
test_item = [str(x) for x in range(10)]
_conf.set("test", "BANAN2", test_item)
self.assertEqual(_conf.get("test", "BANAN2"), test_item)
def test_env_override_list_append(self):
self.assertEqual(os.path.exists(self.filepath), False)
os.environ['CALVIN_CONFIG_PATH'] = self.filepath
_conf = CalConfig()
_conf.add_section("test")
test_item = [str(x) for x in range(10)]
_conf.set("test", "KAKA", test_item)
os.environ['CALVIN_KAKA'] = "HEJ"
self.assertEqual(_conf.get("test", "KAKA"), "HEJ")
test_item = range(10)
test_item2 = ["HEJ", "hej", "HEJ"]
_conf.set("test", "KAKA", test_item)
os.environ['CALVIN_KAKA'] = os.pathsep.join(test_item2)
self.assertEqual(_conf.get("test", "KAKA"), test_item2 + test_item)
test_item = [str(x) for x in range(10)]
_conf.set("test", "KAKA", test_item)
os.environ['CALVIN_KAKA'] = '["HEJ", "hej", "HEJ"]'
self.assertEqual(_conf.get("test", "KAKA"), test_item2 + test_item)
|
{
"content_hash": "023bf6abb88545a36967285dfe1f5983",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 115,
"avg_line_length": 34.436974789915965,
"alnum_prop": 0.5671059053196681,
"repo_name": "MalmoUniversity-DA366A/calvin-base",
"id": "ee98de0eca98389cbf33ef1a6310dc0d47071393",
"size": "4703",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "calvin/utilities/tests/test_calconfig.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "36376"
},
{
"name": "JavaScript",
"bytes": "9947"
},
{
"name": "Python",
"bytes": "692250"
}
],
"symlink_target": ""
}
|
"""
Smart energy channels module for Zigbee Home Automation.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/zha/
"""
import logging
import zigpy.zcl.clusters.smartenergy as smartenergy
from homeassistant.core import callback
from .. import registries
from ..channels import AttributeListeningChannel, ZigbeeChannel
from ..const import REPORT_CONFIG_DEFAULT
_LOGGER = logging.getLogger(__name__)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Calendar.cluster_id)
class Calendar(ZigbeeChannel):
"""Calendar channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.DeviceManagement.cluster_id)
class DeviceManagement(ZigbeeChannel):
"""Device Management channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Drlc.cluster_id)
class Drlc(ZigbeeChannel):
"""Demand Response and Load Control channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.EnergyManagement.cluster_id)
class EnergyManagement(ZigbeeChannel):
"""Energy Management channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Events.cluster_id)
class Events(ZigbeeChannel):
"""Event channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.KeyEstablishment.cluster_id)
class KeyEstablishment(ZigbeeChannel):
"""Key Establishment channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.MduPairing.cluster_id)
class MduPairing(ZigbeeChannel):
"""Pairing channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Messaging.cluster_id)
class Messaging(ZigbeeChannel):
"""Messaging channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Metering.cluster_id)
class Metering(AttributeListeningChannel):
"""Metering channel."""
REPORT_CONFIG = [{"attr": "instantaneous_demand", "config": REPORT_CONFIG_DEFAULT}]
unit_of_measure_map = {
0x00: "kW",
0x01: "m³/h",
0x02: "ft³/h",
0x03: "ccf/h",
0x04: "US gal/h",
0x05: "IMP gal/h",
0x06: "BTU/h",
0x07: "l/h",
0x08: "kPa",
0x09: "kPa",
0x0A: "mcf/h",
0x0B: "unitless",
0x0C: "MJ/s",
}
def __init__(self, cluster, device):
"""Initialize Metering."""
super().__init__(cluster, device)
self._divisor = None
self._multiplier = None
self._unit_enum = None
self._format_spec = None
async def async_configure(self):
"""Configure channel."""
await self.fetch_config(False)
await super().async_configure()
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.fetch_config(True)
await super().async_initialize(from_cache)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute update from Metering cluster."""
super().attribute_updated(attrid, value * self._multiplier / self._divisor)
@property
def unit_of_measurement(self):
"""Return unit of measurement."""
return self.unit_of_measure_map.get(self._unit_enum & 0x7F, "unknown")
async def fetch_config(self, from_cache):
"""Fetch config from device and updates format specifier."""
self._divisor = await self.get_attribute_value("divisor", from_cache=from_cache)
self._multiplier = await self.get_attribute_value(
"multiplier", from_cache=from_cache
)
self._unit_enum = await self.get_attribute_value(
"unit_of_measure", from_cache=from_cache
)
fmting = await self.get_attribute_value(
"demand_formatting", from_cache=from_cache
)
if self._divisor is None or self._divisor == 0:
self._divisor = 1
if self._multiplier is None or self._multiplier == 0:
self._multiplier = 1
if self._unit_enum is None:
self._unit_enum = 0x7F # unknown
if fmting is None:
fmting = 0xF9 # 1 digit to the right, 15 digits to the left
r_digits = fmting & 0x07 # digits to the right of decimal point
l_digits = (fmting >> 3) & 0x0F # digits to the left of decimal point
if l_digits == 0:
l_digits = 15
width = r_digits + l_digits + (1 if r_digits > 0 else 0)
if fmting & 0x80:
self._format_spec = "{:" + str(width) + "." + str(r_digits) + "f}"
else:
self._format_spec = "{:0" + str(width) + "." + str(r_digits) + "f}"
def formatter_function(self, value):
"""Return formatted value for display."""
return self._format_spec.format(value).lstrip()
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Prepayment.cluster_id)
class Prepayment(ZigbeeChannel):
"""Prepayment channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Price.cluster_id)
class Price(ZigbeeChannel):
"""Price channel."""
pass
@registries.ZIGBEE_CHANNEL_REGISTRY.register(smartenergy.Tunneling.cluster_id)
class Tunneling(ZigbeeChannel):
"""Tunneling channel."""
pass
|
{
"content_hash": "8b0cda12b78324e80e91e32f2bd7e992",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 88,
"avg_line_length": 28.86813186813187,
"alnum_prop": 0.6543585839360487,
"repo_name": "Cinntax/home-assistant",
"id": "8e2fa7e3d5a3f2a00f47e23900bf71b8ceb169a9",
"size": "5256",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "homeassistant/components/zha/core/channels/smartenergy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17374056"
},
{
"name": "Shell",
"bytes": "6792"
}
],
"symlink_target": ""
}
|
from collections import deque
import math
import numpy as np
from scipy import signal
class Channel:
def __init__(self, name, min, max, maxNum, offset=0.0):
self.name = name
self.min = min
self.max = max
self.num = 0
self.sum = 0
self.buffersum = 0
self.size = maxNum
self.buffer = deque(maxlen=maxNum)
self.offset = offset
self.npBufferSize = 800
self.npBufferPos = 0
self.npBuffer = np.zeros(self.npBufferSize)
self.lastVal = 0
def __repr__(self):
return "%s (%.1f-%.1f)" % (self.name, self.min, self.max)
def calibrate(self):
self.offset = -self.buffersum / min(self.size, self.num)
def smooth(self, x,beta):
window_len=50
sampleRate = 10
cutOff = 0.01
fir_coeff = signal.firwin(window_len, cutOff)
smoothed = signal.lfilter(fir_coeff, 1.0, self.npBuffer)
return smoothed
def putValue(self, value):
# deque buffer
if self.num >= self.size:
self.buffersum -= self.buffer[0]
newValue = value
self.buffersum += newValue
self.buffer.append(newValue)
self.num += 1
self.sum += newValue
"""
# numpy buffer
self.npBufferPos += 1
if self.npBufferPos >= self.npBufferSize:
self.npBufferPos = 0
self.smoothed = self.smooth(self.npBuffer, 1)
self.gradient = np.diff(self.npBuffer)
try:
self.onUpdate(self)
except:
#raise
pass
self.npBuffer[self.npBufferPos] = value
"""
# Auto Calibration
#if self.num % 100 == 0:
# self.calibrate()
def calibrate(self):
self.offset = -self.buffer[-1]
def getValue(self):
#if self.num > 0:
return self.buffer[-1] + self.offset
def getAvg(self):
return self.sum / self.num + self.offset
def getBufferAvg(self):
val = self.buffer[-1] # current value
avg = self.buffersum / min(self.size, self.num) # moving average
mix = 0.5 * val + 0.5 * avg # weighted average
dif = math.pow((val - avg) / 20, 5) # differential
rng = 0
#for i in self.buffer:
# rng = max(abs(avg-i), rng)
#return rng
return avg + self.offset
if dif > 50:
#self.buffersum = val * self.size
return val + self.offset
else:
return avg + self.offset
def getRng(self):
rng = 0
der = 0
avg = self.buffersum / min(self.size, self.num)
for i in self.buffer:
#rng = 0.01 * max(pow(avg - i, 4), rng)
der = der + pow((avg - i) / 4, 2)
#der = der + abs(avg-i)
der /= self.size
return der
def getDeriv(self):
val = self.buffer[-1] # current value
avg = self.buffersum / min(self.size, self.num) # moving average
mix = 0.5 * val + 0.5 * avg # weighted average
dif = avg - val
#dif = 5 * math.pow(dif / 20, 6) # differential
return dif
def getDiff(self):
avg = self.buffersum / min(self.size, self.num)
result = avg - self.lastVal
self.lastVal = avg
#return math.pow(result, 2)
if self.num>2:
result = self.buffer[-1] - self.buffer[-2]
else:
result = 0
return math.pow(result, 4)
|
{
"content_hash": "dca408b33017e58c82f6272784383034",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 68,
"avg_line_length": 24.75206611570248,
"alnum_prop": 0.6200333889816361,
"repo_name": "Psychedelic-Engineering/sleep-machine",
"id": "1c1650544667d5cb657eeeb3602f194105009e18",
"size": "2995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hardware/channel.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52235"
}
],
"symlink_target": ""
}
|
from pool import Pool
from server import start_server
|
{
"content_hash": "892670e1cba40c8edc33f2485e19ea6e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 31,
"avg_line_length": 14,
"alnum_prop": 0.8035714285714286,
"repo_name": "paraVerifier/paraVerifier",
"id": "4e4385144e410378c1049cb439975a0099f80c5a",
"size": "71",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "server/simpserv/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Isabelle",
"bytes": "24942387"
},
{
"name": "M",
"bytes": "12748"
},
{
"name": "Matlab",
"bytes": "13314"
},
{
"name": "OCaml",
"bytes": "194823"
},
{
"name": "Objective-C",
"bytes": "254579"
},
{
"name": "Python",
"bytes": "43559"
},
{
"name": "Shell",
"bytes": "26443"
},
{
"name": "Standard ML",
"bytes": "41715"
}
],
"symlink_target": ""
}
|
from toolbox import guild_utilities, selection_utilities
from sklearn.metrics import roc_auc_score, average_precision_score
from selection_utilities import generate_samples_from_list_without_replacement
import numpy
def get_balanced_auc(predictions_true, predictions_false, replicable = None):
if replicable is not None:
predictions_t, predictions_f = balance_predictions(predictions_true, predictions_false, n_random_negative_folds = None, replicable = replicable)
auc = get_auc(predictions_t, predictions_f)
sd = 0
values_balanced_t = []
values_balanced_f = []
else:
n_random = 100 #!
values_balanced_t = None
values_balanced_f = None
values = numpy.empty(n_random)
for i in xrange(n_random):
#predictions_t, predictions_f = balance_predictions(predictions_true, predictions_false, n_random_negative_folds = None, replicable = replicable)
predictions_t, predictions_f = balance_predictions(predictions_true, predictions_false, n_random_negative_folds = 1, replicable = replicable) #!
#predictions_t, predictions_f = predictions_true, [ numpy.mean(predictions_false) ] * len(predictions_false) #!
values[i] = get_auc(predictions_t, predictions_f)
if values_balanced_t is None or values_balanced_f is None:
values_balanced_t = numpy.array(predictions_t)
values_balanced_f = numpy.array(predictions_f)
else:
values_balanced_t += predictions_t
values_balanced_f += predictions_f
auc = numpy.mean(values)
sd = numpy.std(values)
values_balanced_t /= n_random
values_balanced_f /= n_random
return auc, sd, values_balanced_t, values_balanced_f
def get_auc(predictions_true, predictions_false):
predictions = predictions_true + predictions_false
labels = [ 1 ] * len(predictions_true) + [ 0 ] * len(predictions_false)
y_scores = numpy.array(predictions) # [0.1, 0.4, 0.35, 0.8]
y_true = numpy.array(labels) # [0, 0, 1, 1]
auc = roc_auc_score(y_true, y_scores)
#fpr, tpr, thresholds = metrics.roc_curve(y_true, y_scores, pos_label=1)
#auc = metrics.auc(fpr, tpr)
return auc
def get_auprc(predictions_true, predictions_false):
predictions = predictions_true + predictions_false
labels = [ 1 ] * len(predictions_true) + [ 0 ] * len(predictions_false)
y_scores = numpy.array(predictions)
y_true = numpy.array(labels)
auprc = average_precision_score(y_true, y_scores)
return auprc
def balance_predictions(predictions_true, predictions_false, n_random_negative_folds = None, replicable=123):
"""
n_random_negative_folds: Number of negative scores to be averaged to be assigned as negative instance.
If None calculated to cover as much as non-seed scores as possible
"""
assert len(predictions_true) != len(predictions_false)
swap = False
if len(predictions_false) < len(predictions_true):
swap = True
predictions = predictions_true
predictions_true = predictions_false
predictions_false = predictions
negative_sample_size = len(predictions_true)
negative_scores = [ 0.0 ] * negative_sample_size
n_fold = 0
for sample in generate_samples_from_list_without_replacement(predictions_false, negative_sample_size, n_random_negative_folds, replicable = replicable):
if len(sample) < negative_sample_size: # last fold
continue
n_fold += 1
for i, val in enumerate(sample):
negative_scores[i] += val
predictions_false = map(lambda x: x/n_fold, negative_scores)
if swap:
return predictions_false, predictions_true
return predictions_true, predictions_false
def create_R_script(file_name, absolute_dir, title=None, only=None, show_spread=False, vertical_average=False, append=False):
if title is not None:
plot_title = title
if append:
f = open(file_name, "a")
else:
f = open(file_name, "w")
f.write("library(ROCR)\n")
f.write("v<-read.table(\"%spredictions.dat\")\n" % absolute_dir)
f.write("l<-read.table(\"%slabels.dat\")\n" % absolute_dir)
f.write("pred<-prediction(v, l)\n")
f.write("average<-function(vals) {\n")
f.write("\tm<-matrix(sapply(vals, function(x){mean(x)}),nrow=length(vals),byrow=T)\n")
f.write("\tm[m==Inf]<-NA\n")
f.write("\tmax(colMeans(m),na.rm=T)\n")
f.write("}\n")
if only == "auc":
f.write("perfAUC<-performance(pred, \"auc\")\n")
f.write("e=c(); n=c(); x=0; for ( i in perfAUC@y.values ) { x<-x+1; e[x] <- i; n[x]<-x }\n")
if append:
f.write("sink(\"%s%s_auc.txt\", append=TRUE, split=TRUE)\n" % (absolute_dir, title))
else:
f.write("sink(\"%sauc.txt\", append=TRUE, split=TRUE)\n" % absolute_dir)
f.write("paste(format(mean(e), digits=3), format(sd(e), digits=3), sep=\" \")\n")
f.write("sink()\n")
f.close()
return
elif only == "cutoff":
if title is None:
plot_title = "Precision vs Sensitivity"
f.write("perfPPV<-performance(pred, \"ppv\")\n")
f.write("perfSens<-performance(pred, \"sens\")\n")
if vertical_average:
if show_spread:
f.write("d<-average(perfPPV@x.values)\n")
f.write("plot(perfPPV, lwd=2, col=2, ylab=\"Percentage\", main=\"%s\", avg=\"vertical\", plotCI.col=2, spread.estimate=\"stddev\", show.spread.at=seq(0,d,by=d/6))\n" % plot_title)
else:
f.write("plot(perfPPV, lwd=2, col=2, ylab=\"Percentage\", main=\"%s\", avg=\"vertical\")\n" % plot_title)
else:
f.write("plot(perfPPV, lwd=2, col=2, ylab=\"Percentage\", main=\"%s\", xlim=c(0,0.4), ylim=c(0,1))\n" % plot_title)
if vertical_average:
if show_spread:
f.write("d<-average(perfSens@x.values)\n")
f.write("plot(perfSens, lwd=2, col=3, avg=\"vertical\", plotCI.col=3, spread.estimate=\"stddev\", show.spread.at=seq(0,d,by=d/6), add=TRUE)\n")
else:
f.write("plot(perfSens, lwd=2, col=3, avg=\"vertical\", add=TRUE)\n")
else:
f.write("plot(perfSens, lwd=2, col=3, add=TRUE)\n")
f.write("perf<-performance(pred, \"prbe\")\n")
f.write("legend(\"bottomright\", c(\"Precision\", \"Sensitivity\", paste(\"(\", format(average(perf@x.values), digits=2), format(average(perf@y.values), digits=2), \")\", sep=\" \")), lty=c(1,1,0), col=c(2,3,1))\n")
f.close()
return
# ROC
f.write("perfROC<-performance(pred, \"tpr\", \"fpr\")\n")
f.write("png(\"%sroc.png\")\n" % absolute_dir)
if title is None:
plot_title = "ROC curve"
if show_spread:
f.write("plot(perfROC, lwd=2, col=2, xlab=\"False Positive Rate\", ylab=\"True Positive Rate\", main=\"%s\", avg=\"vertical\", plotCI.col=2, spread.estimate=\"stddev\", show.spread.at=seq(0,1,by=0.20))\n" % plot_title)
else:
f.write("plot(perfROC, lwd=2, col=2, xlab=\"False Positive Rate\", ylab=\"True Positive Rate\", main=\"%s\", avg=\"vertical\")\n" % plot_title)
f.write("legend(\"bottomright\", c(\"(Avg. over xval folds)\"), lty=c(1), col=c(2))\n")
f.write("dev.off()\n")
# Cutoff (PPV - Sens)
f.write("perfPPV<-performance(pred, \"ppv\")\n")
f.write("perfSens<-performance(pred, \"sens\")\n")
f.write("png(\"%scutoff.png\")\n" % absolute_dir)
if title is None:
plot_title = "Precision vs Sensitivity"
if show_spread:
f.write("d<-average(perfPPV@x.values)\n")
f.write("plot(perfPPV, lwd=2, col=2, ylab=\"Percentage\", main=\"%s\", avg=\"vertical\", plotCI.col=2, spread.estimate=\"stddev\", show.spread.at=seq(0,d,by=d/6))\n" % plot_title)
else:
f.write("plot(perfPPV, lwd=2, col=2, ylab=\"Percentage\", main=\"%s\", avg=\"vertical\")\n" % plot_title)
if show_spread:
f.write("d<-average(perfSens@x.values)\n")
f.write("plot(perfSens, lwd=2, col=3, avg=\"vertical\", plotCI.col=3, spread.estimate=\"stddev\", show.spread.at=seq(0,d,by=d/6), add=TRUE)\n")
else:
f.write("plot(perfSens, lwd=2, col=3, avg=\"vertical\", add=TRUE)\n")
f.write("perf<-performance(pred, \"prbe\")\n")
f.write("legend(\"bottomright\", c(\"Precision\", \"Sensitivity\", paste(\"(\", format(average(perf@x.values), digits=2), format(average(perf@y.values), digits=2), \")\", sep=\" \")), lty=c(1,1,0), col=c(2,3,1))\n")
f.write("dev.off()\n")
# AUC
if title is None:
plot_title = "Area Under ROC Curve (AUC)"
f.write("png(\"%sauc.png\")\n" % absolute_dir)
f.write("perfAUC<-performance(pred, \"auc\")\n")
f.write("e=c(); n=c(); x=0; for ( i in perfAUC@y.values ) { x<-x+1; e[x] <- i; n[x]<-x }; barplot(e, names=n, ylim=c(0,1),ylab= \"AUC\",xlab=\"Fold\", main=\"%s\")\n" % plot_title)
f.write("legend(\"topright\", c(paste(\"(Avg: \", format(mean(e), digits=3), \")\",sep=\"\")), lty=c(), col=c())\n")
f.write("dev.off()\n")
f.write("sink(\"%sauc.txt\", append=TRUE, split=TRUE)\n" % absolute_dir)
f.write("paste(format(mean(e), digits=3), format(sd(e), digits=3), sep=\" \")\n")
f.write("sink()\n")
f.close()
#os.system("R CMD BATCH %s" % "*.R")
return
def create_ROCR_files(list_node_scores_and_labels, file_predictions, file_labels):
"""
list_node_scores_and_labels: list of node (score, label) tuple (corresponding to each validation node) list (corresponding to xval fold)
"""
f_pred = open(file_predictions, 'w')
f_lab = open(file_labels, 'w')
firstTime = True
for i, node_scores_and_labels in enumerate(zip(*list_node_scores_and_labels)):
if i == 0:
for j in xrange(len(node_scores_and_labels)):
f_pred.write("\tFold" + str(j+1))
f_lab.write("\tFold" + str(j+1))
f_pred.write("\n")
f_lab.write("\n")
f_pred.write("%d"%(i+1))
f_lab.write("%d"%(i+1))
for (score, label) in node_scores_and_labels:
f_pred.write("\t" + str(score))
f_lab.write("\t" + str(label))
f_pred.write("\n")
f_lab.write("\n")
f_pred.close()
f_lab.close()
return
def get_validation_node_scores_and_labels(file_result, file_seed_test_scores, file_node_scores, n_random_negative_folds = None, n_negatives = None, default_score = 0, replicable = 123, candidates_file = None, previous_negative_sample_size=None):
"""
Returns a list of scores and labels [ ([0-1], [01]) ] for validation
file_result: File to parse output scores
file_seed_test_scores: File to parse test seeds
file_node_scores: File to parse all non seeds
n_negatives: Number of negative instanaces
If None the same as number of test nodes
n_random_negative_folds: Number of non-seed scores to be averaged to be assigned as negative instance
If None calculated to cover as much as non-seed scores as possible
If 0 all negative data is used
default_score: All nodes that have a higher score than this score in file_node_scores will be considered as seeds
"""
from guild_utilities import get_node_to_score, get_nodes
node_to_score = get_node_to_score(file_result)
test_nodes = get_nodes(file_seed_test_scores)
initial_to_score = get_node_to_score(file_node_scores)
non_seeds = set([ node for node, score in initial_to_score.iteritems() if score==default_score ])
node_validation_data = [ (node_to_score[node], 1) for node in test_nodes ]
if candidates_file is not None:
candidates = get_nodes(candidates_file)
node_to_score = dict([ (node, node_to_score[node]) for node in candidates ])
non_seeds = list(non_seeds & candidates)
if n_random_negative_folds == 0:
negative_sample_size = None
node_validation_data.extend([(node_to_score[node], 0) for node in set(node_to_score.keys()) & non_seeds ])
else:
n_actual_folds = 0
if n_negatives is None:
n_negatives = len(test_nodes)
negative_sample_size = n_negatives
if previous_negative_sample_size is not None:
if previous_negative_sample_size > negative_sample_size:
negative_sample_size = previous_negative_sample_size
negative_scores = [ 0 ] * negative_sample_size
non_seeds = list(non_seeds)
for sample in generate_samples_from_list_without_replacement(non_seeds, negative_sample_size, n_random_negative_folds, replicable = replicable):
for i, node in enumerate(sample):
negative_scores[i] += node_to_score[node]
n_actual_folds += 1
node_validation_data.extend(map(lambda x: (x/n_actual_folds, 0), negative_scores))
return node_validation_data, negative_sample_size
def calculate_performance_metric_counts_using_random_negatives(node_to_score, setNodeTest, non_seeds, score_threshold, n_random_negative_folds = None, replicable=123):
from selection_utilities import generate_samples_from_list_without_replacement
(nTP, nFP, nFN, nTN) = (0.0, 0.0, 0.0, 0.0)
for id, score in node_to_score.iteritems(): # if candidates based - for each candidate
if id in setNodeTest: # in the initial association file
if score >= score_threshold:
nTP += 1
else:
nFN += 1
if n_random_negative_folds == 0:
for id, score in node_to_score.iteritems():
if id in non_seeds:
if score >= score_threshold:
nFP += 1
else:
nTN += 1
else:
n_actual_folds = 0
for sample in generate_samples_from_list_without_replacement(non_seeds, len(setNodeTest), n_random_negative_folds, replicable = replicable):
setNegative = set(sample)
n_actual_folds += 1
for id, score in node_to_score.iteritems():
if id in setNegative:
if score >= score_threshold:
nFP += 1
else:
nTN += 1
nFP /= n_actual_folds
nTN /= n_actual_folds
return (nTP, nFP, nFN, nTN)
def calculatePerformance(nTP, nFP, nFN, nTN):
try:
acc = (nTP + nTN) / (nTP + nFP + nTN + nFN)
except ZeroDivisionError:
acc = None
try:
sens = nTP / (nTP + nFN)
except:
sens = None
try:
spec = nTN / (nTN + nFP)
except:
spec = None
try:
ppv = nTP / (nTP + nFP)
except:
ppv = None
#if spec is not None:
# return (sens, (1-spec))
#else:
# return (sens, None)
return (acc, sens, spec, ppv)
|
{
"content_hash": "a93749ac54b535eb1e15c4f57f2ff57c",
"timestamp": "",
"source": "github",
"line_count": 313,
"max_line_length": 245,
"avg_line_length": 44.43130990415335,
"alnum_prop": 0.6433450780182642,
"repo_name": "quimaguirre/diana",
"id": "9075327d2e2630675c777e4a53240aea9a11b75c",
"size": "13907",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "diana/toolbox/classifier_evaluation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "78309181"
},
{
"name": "Roff",
"bytes": "15999246"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
]
|
{
"content_hash": "db17aaeee48a608fdee30d861a30fed5",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 114,
"avg_line_length": 20.8,
"alnum_prop": 0.5745192307692307,
"repo_name": "ejpreciado/superlists",
"id": "a419fa172d616e46398a387f6bf38194e0da59e5",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lists/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7896"
},
{
"name": "HTML",
"bytes": "2877"
},
{
"name": "JavaScript",
"bytes": "139143"
},
{
"name": "Python",
"bytes": "33882"
}
],
"symlink_target": ""
}
|
"""
Copyright (c) Microsoft Open Technologies (Shanghai) Co. Ltd. All rights reserved.
The MIT License (MIT)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
__author__ = 'Yifu Huang'
|
{
"content_hash": "5568f0f41a60be5d08f68af770b44933",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 83,
"avg_line_length": 48.666666666666664,
"alnum_prop": 0.7893835616438356,
"repo_name": "frankyao47/open-hackathon",
"id": "f260be7ba15ac6814279aff42c3eb9b5534380ba",
"size": "1193",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "open-hackathon-server/src/hackathon/azureformation/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "160619"
},
{
"name": "HTML",
"bytes": "525535"
},
{
"name": "Java",
"bytes": "9272"
},
{
"name": "JavaScript",
"bytes": "566202"
},
{
"name": "Python",
"bytes": "2492494"
},
{
"name": "Ruby",
"bytes": "1518308"
},
{
"name": "Shell",
"bytes": "21536"
}
],
"symlink_target": ""
}
|
"""
This module contains the 'email_address' menu node.
"""
from random import choice
from textwrap import dedent
from services import email
from typeclasses.players import Player
def email_address(caller, input):
"""Prompt the user to enter a valid email address."""
text = ""
options = (
{
"key": "b",
"desc": "Go back to the login screen.",
"goto": "start",
},
{
"key": "_default",
"desc": "Enter a valid e-mail address.",
"goto": "email_address",
},
)
email_address = input.strip()
player = caller.db._player
# Search for players with an identical e-mail address
identical = list(Player.objects.filter(email=email_address))
if player in identical:
identical.remove(player)
if not email.is_email_address(email_address):
# The e-mail address doesn't seem to be valid
text = dedent("""
|rSorry, the specified e-mail address {} cannot be
accepted as a valid one.|n You can:
Type |yb|n to return to the login screen.
Or enter another e-mail address.
""".strip("\n")).format(email_address)
elif identical:
# The e-mail address is already used
text = dedent("""
|rThe e-mail address you have entered is already being used
by another account. You can either:
Type |yb|n to return to the login screen.
Or enter another e-mail address.
""".strip("\n"))
else:
player.email = email_address
player.save()
# Generates the 4-digit validation code
numbers = "012345678"
code = ""
for i in range(4):
code += choice(numbers)
# Sends an e-mail with the code
subject = "Account validation"
body = dedent("""
You have successfully created the account {} on that MUD.
In order to validate it and begin to play, you need to
enter the following four-digit code in your MUD client.
If you have been disconnected, just login again, entering
your account's name and password, the validation screen
will be displayed.
Four-digit code: {}
""".strip("\n")).format(player.name, code)
recipent = email_address
error = "The account {}'s validation code is {}.".format(
player.name, code)
player.db.valid = False
player.db.validation_code = code
email.send("team@no-host.com", recipent, subject, body, error)
text = dedent("""
An email has been sent to {}. It contains your validation
code which you'll need to finish creating your account.
If you haven't received the validation e-mail after some
minutes have passed, check your spam folder to see if
it's inside. If not, you might try to select
another e-mail address, or contact an administrator.
From here you can:
Type |yb|n to choose a different e-mail address.
Enter your received confirmation code.
""".strip("\n")).format(email_address)
options = (
{
"key": "b",
"desc": "Go back to the e-mail address selection.",
"goto": "email_address",
},
{
"key": "_default",
"desc": "Enter your validation code.",
"goto": "validate_account",
},
)
return text, options
def text_email_address(player):
"""Return the text for the email-address menu node."""
text = dedent("""
Enter a valid e-mail address for the account {}.
An e-mail confirmation will be sent to this address with a
four-digit code that you will have to enter to validate this
account. This e-mail address will be used only by the staff,
if needed. You will be able to update this e-mail address if
desired.
Enter your email address.
""".strip("\n")).format(player.name)
return text
|
{
"content_hash": "6db71eaf69e154cac6c764fea8c39cb3",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 71,
"avg_line_length": 34.26829268292683,
"alnum_prop": 0.5655990510083037,
"repo_name": "vlegoff/mud",
"id": "e5ecfeb1ba9bbfba55403235471f033165dcb415",
"size": "4217",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "menu/email_address.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "98485"
}
],
"symlink_target": ""
}
|
import collections
import os
import re
import subprocess
import base64
import os.path as osp
import pickle as pickle
import inspect
import hashlib
import sys
from contextlib import contextmanager
import errno
from io import StringIO
import datetime
import dateutil.tz
import json
import time
import numpy as np
from rllab.misc import ext
from rllab.misc.ext import AttrDict
from rllab.viskit.core import flatten
from rllab.core.serializable import Serializable
from rllab.misc.console import mkdir_p
from pytorchrl import config
class StubBase(object):
def __getitem__(self, item):
return StubMethodCall(self, "__getitem__", args=[item], kwargs=dict())
def __getattr__(self, item):
try:
return super(self.__class__, self).__getattribute__(item)
except AttributeError:
if item.startswith("__") and item.endswith("__"):
raise
return StubAttr(self, item)
def __pow__(self, power, modulo=None):
return StubMethodCall(self, "__pow__", [power, modulo], dict())
def __call__(self, *args, **kwargs):
return StubMethodCall(self.obj, self.attr_name, args, kwargs)
def __add__(self, other):
return StubMethodCall(self, "__add__", [other], dict())
def __rmul__(self, other):
return StubMethodCall(self, "__rmul__", [other], dict())
def __div__(self, other):
return StubMethodCall(self, "__div__", [other], dict())
def __rdiv__(self, other):
return StubMethodCall(BinaryOp(), "rdiv", [self, other], dict()) # self, "__rdiv__", [other], dict())
def __rpow__(self, power, modulo=None):
return StubMethodCall(self, "__rpow__", [power, modulo], dict())
class BinaryOp(Serializable):
def __init__(self):
Serializable.quick_init(self, locals())
def rdiv(self, a, b):
return b / a
# def __init__(self, opname, a, b):
# self.opname = opname
# self.a = a
# self.b = b
class StubAttr(StubBase):
def __init__(self, obj, attr_name):
self.__dict__["_obj"] = obj
self.__dict__["_attr_name"] = attr_name
@property
def obj(self):
return self.__dict__["_obj"]
@property
def attr_name(self):
return self.__dict__["_attr_name"]
def __str__(self):
return "StubAttr(%s, %s)" % (str(self.obj), str(self.attr_name))
class StubMethodCall(StubBase, Serializable):
def __init__(self, obj, method_name, args, kwargs):
self._serializable_initialized = False
Serializable.quick_init(self, locals())
self.obj = obj
self.method_name = method_name
self.args = args
self.kwargs = kwargs
def __str__(self):
return "StubMethodCall(%s, %s, %s, %s)" % (
str(self.obj), str(self.method_name), str(self.args), str(self.kwargs))
class StubClass(StubBase):
def __init__(self, proxy_class):
self.proxy_class = proxy_class
def __call__(self, *args, **kwargs):
if len(args) > 0:
# Convert the positional arguments to keyword arguments
spec = inspect.getargspec(self.proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
return StubObject(self.proxy_class, *args, **kwargs)
def __getstate__(self):
return dict(proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError
def __str__(self):
return "StubClass(%s)" % self.proxy_class
class StubObject(StubBase):
def __init__(self, __proxy_class, *args, **kwargs):
if len(args) > 0:
spec = inspect.getargspec(__proxy_class.__init__)
kwargs = dict(list(zip(spec.args[1:], args)), **kwargs)
args = tuple()
self.proxy_class = __proxy_class
self.args = args
self.kwargs = kwargs
def __getstate__(self):
return dict(args=self.args, kwargs=self.kwargs, proxy_class=self.proxy_class)
def __setstate__(self, dict):
self.args = dict["args"]
self.kwargs = dict["kwargs"]
self.proxy_class = dict["proxy_class"]
def __getattr__(self, item):
# why doesnt the commented code work?
# return StubAttr(self, item)
# checks bypassed to allow for accesing instance fileds
if hasattr(self.proxy_class, item):
return StubAttr(self, item)
raise AttributeError('Cannot get attribute %s from %s' % (item, self.proxy_class))
def __str__(self):
return "StubObject(%s, *%s, **%s)" % (str(self.proxy_class), str(self.args), str(self.kwargs))
class VariantDict(AttrDict):
def __init__(self, d, hidden_keys):
super(VariantDict, self).__init__(d)
self._hidden_keys = hidden_keys
def dump(self):
return {k: v for k, v in self.items() if k not in self._hidden_keys}
class VariantGenerator(object):
"""
Usage:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", ['x', 'y'])
vg.variants() => # all combinations of [1,2,3] x ['x','y']
Supports noncyclic dependency among parameters:
vg = VariantGenerator()
vg.add("param1", [1, 2, 3])
vg.add("param2", lambda param1: [param1+1, param1+2])
vg.variants() => # ..
"""
def __init__(self):
self._variants = []
self._populate_variants()
self._hidden_keys = []
for k, vs, cfg in self._variants:
if cfg.get("hide", False):
self._hidden_keys.append(k)
def add(self, key, vals, **kwargs):
self._variants.append((key, vals, kwargs))
def _populate_variants(self):
methods = inspect.getmembers(
self.__class__, predicate=lambda x: inspect.isfunction(x) or inspect.ismethod(x))
methods = [x[1].__get__(self, self.__class__)
for x in methods if getattr(x[1], '__is_variant', False)]
for m in methods:
self.add(m.__name__, m, **getattr(m, "__variant_config", dict()))
def variants(self, randomized=False):
ret = list(self.ivariants())
if randomized:
np.random.shuffle(ret)
return list(map(self.variant_dict, ret))
def variant_dict(self, variant):
return VariantDict(variant, self._hidden_keys)
def to_name_suffix(self, variant):
suffix = []
for k, vs, cfg in self._variants:
if not cfg.get("hide", False):
suffix.append(k + "_" + str(variant[k]))
return "_".join(suffix)
def ivariants(self):
dependencies = list()
for key, vals, _ in self._variants:
if hasattr(vals, "__call__"):
args = inspect.getargspec(vals).args
if hasattr(vals, 'im_self') or hasattr(vals, "__self__"):
# remove the first 'self' parameter
args = args[1:]
dependencies.append((key, set(args)))
else:
dependencies.append((key, set()))
sorted_keys = []
# topo sort all nodes
while len(sorted_keys) < len(self._variants):
# get all nodes with zero in-degree
free_nodes = [k for k, v in dependencies if len(v) == 0]
if len(free_nodes) == 0:
error_msg = "Invalid parameter dependency: \n"
for k, v in dependencies:
if len(v) > 0:
error_msg += k + " depends on " + " & ".join(v) + "\n"
raise ValueError(error_msg)
dependencies = [(k, v)
for k, v in dependencies if k not in free_nodes]
# remove the free nodes from the remaining dependencies
for _, v in dependencies:
v.difference_update(free_nodes)
sorted_keys += free_nodes
return self._ivariants_sorted(sorted_keys)
def _ivariants_sorted(self, sorted_keys):
if len(sorted_keys) == 0:
yield dict()
else:
first_keys = sorted_keys[:-1]
first_variants = self._ivariants_sorted(first_keys)
last_key = sorted_keys[-1]
last_vals = [v for k, v, _ in self._variants if k == last_key][0]
if hasattr(last_vals, "__call__"):
last_val_keys = inspect.getargspec(last_vals).args
if hasattr(last_vals, 'im_self') or hasattr(last_vals, '__self__'):
last_val_keys = last_val_keys[1:]
else:
last_val_keys = None
for variant in first_variants:
if hasattr(last_vals, "__call__"):
last_variants = last_vals(
**{k: variant[k] for k in last_val_keys})
for last_choice in last_variants:
yield AttrDict(variant, **{last_key: last_choice})
else:
for last_choice in last_vals:
yield AttrDict(variant, **{last_key: last_choice})
def variant(*args, **kwargs):
def _variant(fn):
fn.__is_variant = True
fn.__variant_config = kwargs
return fn
if len(args) == 1 and isinstance(args[0], collections.Callable):
return _variant(args[0])
return _variant
def stub(glbs):
# replace the __init__ method in all classes
# hacky!!!
for k, v in list(glbs.items()):
# look at all variables that are instances of a class (not yet Stub)
if isinstance(v, type) and v != StubClass:
glbs[k] = StubClass(v) # and replaces them by a the same but Stub
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
exp_count = 0
now = datetime.datetime.now(dateutil.tz.tzlocal())
timestamp = now.strftime('%Y_%m_%d_%H_%M_%S')
remote_confirmed = False
def run_experiment_lite(
stub_method_call=None,
batch_tasks=None,
exp_prefix="experiment",
exp_name=None,
log_dir=None,
script="scripts/run_experiment_lite.py",
python_command="python",
mode="local",
dry=False,
docker_image=None,
aws_config=None,
env=None,
variant=None,
use_gpu=False,
sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
confirm_remote=True,
terminate_machine=True,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=True,
use_cloudpickle=None,
pre_commands=None,
added_project_directories=[],
**kwargs):
"""
Serialize the stubbed method call and run the experiment using the specified mode.
:param stub_method_call: A stubbed method call.
:param script: The name of the entrance point python script
:param mode: Where & how to run the experiment. Should be one of "local", "local_docker", "ec2",
and "lab_kube".
:param dry: Whether to do a dry-run, which only prints the commands without executing them.
:param exp_prefix: Name prefix for the experiments
:param docker_image: name of the docker image. Ignored if using local mode.
:param aws_config: configuration for AWS. Only used under EC2 mode
:param env: extra environment variables
:param kwargs: All other parameters will be passed directly to the entrance python script.
:param variant: If provided, should be a dictionary of parameters
:param use_gpu: Whether the launched task is running on GPU. This triggers a few configuration changes including
certain environment flags
:param sync_s3_pkl: Whether to sync pkl files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_png: Whether to sync png files during execution of the experiment (they will always be synced at
the end of the experiment)
:param sync_s3_log: Whether to sync log files during execution of the experiment (they will always be synced at
the end of the experiment)
:param confirm_remote: Whether to confirm before launching experiments remotely
:param terminate_machine: Whether to terminate machine after experiment finishes. Only used when using
mode="ec2". This is useful when one wants to debug after an experiment finishes abnormally.
:param periodic_sync: Whether to synchronize certain experiment files periodically during execution.
:param periodic_sync_interval: Time interval between each periodic sync, in seconds.
"""
assert stub_method_call is not None or batch_tasks is not None, "Must provide at least either stub_method_call or batch_tasks"
if use_cloudpickle is None:
for maybe_stub in (batch_tasks or [stub_method_call]):
# decide mode
if isinstance(maybe_stub, StubBase):
use_cloudpickle = False
else:
assert hasattr(maybe_stub, '__call__')
use_cloudpickle = True
# ensure variant exists
if variant is None:
variant = dict()
if batch_tasks is None:
batch_tasks = [
dict(
kwargs,
pre_commands=pre_commands,
stub_method_call=stub_method_call,
exp_name=exp_name,
log_dir=log_dir,
env=env,
variant=variant,
use_cloudpickle=use_cloudpickle
)
]
global exp_count
global remote_confirmed
config.USE_GPU = use_gpu
# params_list = []
for task in batch_tasks:
call = task.pop("stub_method_call")
if use_cloudpickle:
import cloudpickle
data = base64.b64encode(cloudpickle.dumps(call)).decode("utf-8")
else:
data = base64.b64encode(pickle.dumps(call)).decode("utf-8")
task["args_data"] = data
exp_count += 1
params = dict(kwargs)
if task.get("exp_name", None) is None:
task["exp_name"] = "%s_%s_%04d" % (
exp_prefix, timestamp, exp_count)
if task.get("log_dir", None) is None:
task["log_dir"] = config.LOG_DIR + "/local/" + \
exp_prefix.replace("_", "-") + "/" + task["exp_name"]
if task.get("variant", None) is not None:
variant = task.pop("variant")
if "exp_name" not in variant:
variant["exp_name"] = task["exp_name"]
task["variant_data"] = base64.b64encode(pickle.dumps(variant)).decode("utf-8")
elif "variant" in task:
del task["variant"]
task["remote_log_dir"] = osp.join(
config.AWS_S3_PATH, exp_prefix.replace("_", "-"), task["exp_name"])
task["env"] = task.get("env", dict()) or dict()
task["env"]["RLLAB_USE_GPU"] = str(use_gpu)
if mode not in ["local", "local_docker"] and not remote_confirmed and not dry and confirm_remote:
remote_confirmed = query_yes_no(
"Running in (non-dry) mode %s. Confirm?" % mode)
if not remote_confirmed:
sys.exit(1)
if hasattr(mode, "__call__"):
if docker_image is None:
docker_image = config.DOCKER_IMAGE
mode(
task,
docker_image=docker_image,
use_gpu=use_gpu,
exp_prefix=exp_prefix,
script=script,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
)
elif mode == "local":
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_local_command(
task,
python_command=python_command,
script=osp.join(config.PROJECT_PATH, script),
use_gpu=use_gpu
)
print(command)
if dry:
return
try:
if env is None:
env = dict()
subprocess.call(
command, shell=True, env=dict(os.environ, **env))
except Exception as e:
print(e)
if isinstance(e, KeyboardInterrupt):
raise
elif mode == "local_docker":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
del task["remote_log_dir"]
env = task.pop("env", None)
command = to_docker_command(
task, # these are the params. Pre and Post command can be here
docker_image=docker_image,
script=script,
env=env,
use_gpu=use_gpu,
use_tty=True,
python_command=python_command,
)
print(command)
if dry:
return
p = subprocess.Popen(command, shell=True)
try:
p.wait()
except KeyboardInterrupt:
try:
print("terminating")
p.terminate()
except OSError:
print("os error!")
pass
p.wait()
elif mode == "ec2":
if docker_image is None:
docker_image = config.DOCKER_IMAGE
s3_code_path = s3_sync_code(config, dry=dry, added_project_directories=added_project_directories)
launch_ec2(batch_tasks,
exp_prefix=exp_prefix,
docker_image=docker_image,
python_command=python_command,
script=script,
aws_config=aws_config,
dry=dry,
terminate_machine=terminate_machine,
use_gpu=use_gpu,
code_full_path=s3_code_path,
sync_s3_pkl=sync_s3_pkl,
sync_s3_png=sync_s3_png,
sync_s3_log=sync_s3_log,
sync_log_on_termination=sync_log_on_termination,
periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval)
elif mode == "lab_kube":
# assert env is None
# first send code folder to s3
s3_code_path = s3_sync_code(config, dry=dry)
if docker_image is None:
docker_image = config.DOCKER_IMAGE
for task in batch_tasks:
# if 'env' in task:
# assert task.pop('env') is None
# TODO: dangerous when there are multiple tasks?
task["resources"] = params.pop(
"resources", config.KUBE_DEFAULT_RESOURCES)
task["node_selector"] = params.pop(
"node_selector", config.KUBE_DEFAULT_NODE_SELECTOR)
task["exp_prefix"] = exp_prefix
pod_dict = to_lab_kube_pod(
task, code_full_path=s3_code_path, docker_image=docker_image, script=script, is_gpu=use_gpu,
python_command=python_command,
sync_s3_pkl=sync_s3_pkl, periodic_sync=periodic_sync,
periodic_sync_interval=periodic_sync_interval,
sync_all_data_node_to_s3=sync_all_data_node_to_s3,
terminate_machine=terminate_machine,
)
pod_str = json.dumps(pod_dict, indent=1)
if dry:
print(pod_str)
dir = "{pod_dir}/{exp_prefix}".format(
pod_dir=config.POD_DIR, exp_prefix=exp_prefix)
ensure_dir(dir)
fname = "{dir}/{exp_name}.json".format(
dir=dir,
exp_name=task["exp_name"]
)
with open(fname, "w") as fh:
fh.write(pod_str)
kubecmd = "kubectl create -f %s" % fname
print(kubecmd)
if dry:
return
retry_count = 0
wait_interval = 1
while retry_count <= 5:
try:
return_code = subprocess.call(kubecmd, shell=True)
if return_code == 0:
break
retry_count += 1
print("trying again...")
time.sleep(wait_interval)
except Exception as e:
if isinstance(e, KeyboardInterrupt):
raise
print(e)
else:
raise NotImplementedError
_find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search
def ensure_dir(dirname):
"""
Ensure that a named directory exists; if it does not, attempt to create it.
"""
try:
os.makedirs(dirname)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def _shellquote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _to_param_val(v):
if v is None:
return ""
elif isinstance(v, list):
return " ".join(map(_shellquote, list(map(str, v))))
else:
return _shellquote(str(v))
def to_local_command(params, python_command="python", script=osp.join(config.PROJECT_PATH,
'scripts/run_experiment.py'),
use_gpu=False):
command = python_command + " " + script
if use_gpu and not config.USE_TF:
command = "THEANO_FLAGS='device=gpu,dnn.enabled=auto,floatX=float32' " + command
for k, v in config.ENV.items():
command = ("%s=%s " % (k, v)) + command
pre_commands = params.pop("pre_commands", None)
post_commands = params.pop("post_commands", None)
if pre_commands is not None or post_commands is not None:
print("Not executing the pre_commands: ", pre_commands, ", nor post_commands: ", post_commands)
for k, v in params.items():
if isinstance(v, dict):
for nk, nv in v.items():
if str(nk) == "_name":
command += " --%s %s" % (k, _to_param_val(nv))
else:
command += \
" --%s_%s %s" % (k, nk, _to_param_val(nv))
else:
command += " --%s %s" % (k, _to_param_val(v))
return command
def to_docker_command(params, docker_image, python_command="python", script='scripts/run_experiment_lite.py',
pre_commands=None, use_tty=False,
mujoco_path=None,
post_commands=None, dry=False, use_gpu=False, env=None, local_code_dir=None):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
docker_args = params.pop("docker_args", "")
if pre_commands is None:
pre_commands = params.pop("pre_commands", None)
if post_commands is None:
post_commands = params.pop("post_commands", None)
if mujoco_path is None:
mujoco_path = config.MUJOCO_KEY_PATH
# script = 'rllab/' + script
# if not dry:
# create volume for logging directory
if use_gpu:
command_prefix = "nvidia-docker run"
else:
command_prefix = "docker run"
docker_log_dir = config.DOCKER_LOG_DIR
if env is None:
env = dict()
env = dict(
env,
AWS_ACCESS_KEY_ID=config.AWS_ACCESS_KEY,
AWS_SECRET_ACCESS_KEY=config.AWS_ACCESS_SECRET,
)
if env is not None:
for k, v in env.items():
command_prefix += " -e \"{k}={v}\"".format(k=k, v=v)
command_prefix += " -v {local_mujoco_key_dir}:{docker_mujoco_key_dir}".format(
local_mujoco_key_dir=mujoco_path, docker_mujoco_key_dir='/root/.mujoco')
command_prefix += " -v {local_log_dir}:{docker_log_dir}".format(
local_log_dir=log_dir,
docker_log_dir=docker_log_dir
)
command_prefix += docker_args
if local_code_dir is None:
local_code_dir = config.PROJECT_PATH
command_prefix += " -v {local_code_dir}:{docker_code_dir}".format(
local_code_dir=local_code_dir,
docker_code_dir=config.DOCKER_CODE_DIR
)
params = dict(params, log_dir=docker_log_dir)
if use_tty:
command_prefix += " -ti " + docker_image + " /bin/bash -c "
else:
command_prefix += " -i " + docker_image + " /bin/bash -c "
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(to_local_command(
params, python_command=python_command, script=osp.join(config.DOCKER_CODE_DIR, script), use_gpu=use_gpu))
# We for 2 min sleep after termination to allow for last syncs.
if post_commands is None:
post_commands = ['sleep 120']
command_list.extend(post_commands)
return command_prefix + "'" + "; ".join(command_list) + "'"
def dedent(s):
lines = [l.strip() for l in s.split('\n')]
return '\n'.join(lines)
def launch_ec2(params_list, exp_prefix, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
aws_config=None, dry=False, terminate_machine=True, use_gpu=False, sync_s3_pkl=False,
sync_s3_png=False,
sync_s3_log=False,
sync_log_on_termination=True,
periodic_sync=True, periodic_sync_interval=15):
if len(params_list) == 0:
return
default_config = dict(
image_id=config.AWS_IMAGE_ID,
instance_type=config.AWS_INSTANCE_TYPE,
key_name=config.AWS_KEY_NAME,
spot=config.AWS_SPOT,
spot_price=config.AWS_SPOT_PRICE,
iam_instance_profile_name=config.AWS_IAM_INSTANCE_PROFILE_NAME,
security_groups=config.AWS_SECURITY_GROUPS,
security_group_ids=config.AWS_SECURITY_GROUP_IDS,
network_interfaces=config.AWS_NETWORK_INTERFACES,
)
if aws_config is None:
aws_config = dict()
aws_config = dict(default_config, **aws_config)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("{\n")
sio.write("""
die() { status=$1; shift; echo "FATAL: $*"; exit $status; }
""")
# Get the meta data about the instance
# http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-instance-metadata.html
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id`"
""")
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params_list[0].get("exp_name"), aws_region=config.AWS_REGION_NAME))
if config.LABEL:
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=owner,Value={label} --region {aws_region}
""".format(label=config.LABEL, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=exp_prefix,Value={exp_prefix} --region {aws_region}
""".format(exp_prefix=exp_prefix, aws_region=config.AWS_REGION_NAME))
sio.write("""
service docker start
""")
sio.write("""
docker --config /home/ubuntu/.docker pull {docker_image}
""".format(docker_image=docker_image))
sio.write("""
export AWS_DEFAULT_REGION={aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
if config.FAST_CODE_SYNC:
# sio.write("""
# aws s3 cp {code_full_path} /tmp/rllab_code.tar.gz --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp {code_full_path} /tmp/pytorchrl_code.tar.gz
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
sio.write("""
mkdir -p {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
sio.write("""
tar -zxvf /tmp/pytorchrl_code.tar.gz -C {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
aws_region=config.AWS_REGION_NAME))
else:
# sio.write("""
# aws s3 cp --recursive {code_full_path} {local_code_path} --region {aws_region}
# """.format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR,
# aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {code_full_path} {local_code_path}
""".format(code_full_path=code_full_path, local_code_path=config.DOCKER_CODE_DIR))
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
# sio.write("""
# aws s3 cp --recursive {} {} --region {}
# """.format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH, config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {} {}
""".format(s3_mujoco_key_path, config.MUJOCO_KEY_PATH))
sio.write("""
cd {local_code_path}
""".format(local_code_path=config.DOCKER_CODE_DIR))
for params in params_list:
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
env = params.pop("env", None)
sio.write("""
aws ec2 create-tags --resources $EC2_INSTANCE_ID --tags Key=Name,Value={exp_name} --region {aws_region}
""".format(exp_name=params.get("exp_name"), aws_region=config.AWS_REGION_NAME))
sio.write("""
mkdir -p {log_dir}
""".format(log_dir=log_dir))
if periodic_sync:
include_png = " --include '*.png' " if sync_s3_png else " "
include_pkl = " --include '*.pkl' " if sync_s3_pkl else " "
include_log = " --include '*.log' " if sync_s3_log else " "
# sio.write("""
# while /bin/true; do
# aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region}
# sleep {periodic_sync_interval}
# done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
# log_dir=log_dir, remote_log_dir=remote_log_dir,
# aws_region=config.AWS_REGION_NAME,
# periodic_sync_interval=periodic_sync_interval))
sio.write("""
while /bin/true; do
aws s3 sync --exclude '*' {include_png} {include_pkl} {include_log}--include '*.csv' --include '*.json' {log_dir} {remote_log_dir}
sleep {periodic_sync_interval}
done & echo sync initiated""".format(include_png=include_png, include_pkl=include_pkl, include_log=include_log,
log_dir=log_dir, remote_log_dir=remote_log_dir,
periodic_sync_interval=periodic_sync_interval))
if sync_log_on_termination:
# sio.write("""
# while /bin/true; do
# if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
# then
# logger "Running shutdown hook."
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# break
# else
# # Spot instance not yet marked for termination.
# sleep 5
# fi
# done & echo log sync initiated
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
while /bin/true; do
if [ -z $(curl -Is http://169.254.169.254/latest/meta-data/spot/termination-time | head -1 | grep 404 | cut -d \ -f 2) ]
then
logger "Running shutdown hook."
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
aws s3 cp --recursive {log_dir} {remote_log_dir}
break
else
# Spot instance not yet marked for termination.
sleep 5
fi
done & echo log sync initiated
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
if use_gpu:
sio.write("""
for i in {1..800}; do su -c "nvidia-modprobe -u -c=0" ubuntu && break || sleep 3; done
systemctl start nvidia-docker
""")
sio.write("""
{command}
""".format(command=to_docker_command(params, docker_image, python_command=python_command, script=script,
use_gpu=use_gpu, env=env,
local_code_dir=config.DOCKER_CODE_DIR)))
# sio.write("""
# aws s3 cp --recursive {log_dir} {remote_log_dir} --region {aws_region}
# """.format(log_dir=log_dir, remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp --recursive {log_dir} {remote_log_dir}
""".format(log_dir=log_dir, remote_log_dir=remote_log_dir))
# sio.write("""
# aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log --region {aws_region}
# """.format(remote_log_dir=remote_log_dir, aws_region=config.AWS_REGION_NAME))
sio.write("""
aws s3 cp /home/ubuntu/user_data.log {remote_log_dir}/stdout.log
""".format(remote_log_dir=remote_log_dir))
if terminate_machine:
sio.write("""
EC2_INSTANCE_ID="`wget -q -O - http://169.254.169.254/latest/meta-data/instance-id || die \"wget instance-id has failed: $?\"`"
aws ec2 terminate-instances --instance-ids $EC2_INSTANCE_ID --region {aws_region}
""".format(aws_region=config.AWS_REGION_NAME))
sio.write("} >> /home/ubuntu/user_data.log 2>&1\n")
full_script = dedent(sio.getvalue())
import boto3
import botocore
if aws_config["spot"]:
ec2 = boto3.client(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
else:
ec2 = boto3.resource(
"ec2",
region_name=config.AWS_REGION_NAME,
aws_access_key_id=config.AWS_ACCESS_KEY,
aws_secret_access_key=config.AWS_ACCESS_SECRET,
)
if len(full_script) > 10000 or len(base64.b64encode(full_script.encode()).decode("utf-8")) > 10000:
# Script too long; need to upload script to s3 first.
# We're being conservative here since the actual limit is 16384 bytes
s3_path = upload_file_to_s3(full_script)
sio = StringIO()
sio.write("#!/bin/bash\n")
sio.write("""
aws s3 cp {s3_path} /home/ubuntu/remote_script.sh --region {aws_region} && \\
chmod +x /home/ubuntu/remote_script.sh && \\
bash /home/ubuntu/remote_script.sh
""".format(s3_path=s3_path, aws_region=config.AWS_REGION_NAME))
user_data = dedent(sio.getvalue())
else:
user_data = full_script
print(full_script)
with open("/tmp/full_script", "w") as f:
f.write(full_script)
instance_args = dict(
ImageId=aws_config["image_id"],
KeyName=aws_config["key_name"],
UserData=user_data,
InstanceType=aws_config["instance_type"],
EbsOptimized=config.EBS_OPTIMIZED,
SecurityGroups=aws_config["security_groups"],
SecurityGroupIds=aws_config["security_group_ids"],
NetworkInterfaces=aws_config["network_interfaces"],
IamInstanceProfile=dict(
Name=aws_config["iam_instance_profile_name"],
),
**config.AWS_EXTRA_CONFIGS,
)
if len(instance_args["NetworkInterfaces"]) > 0:
# disable_security_group = query_yes_no(
# "Cannot provide both network interfaces and security groups info. Do you want to disable security group settings?",
# default="yes",
# )
disable_security_group = True
if disable_security_group:
instance_args.pop("SecurityGroups")
instance_args.pop("SecurityGroupIds")
if aws_config.get("placement", None) is not None:
instance_args["Placement"] = aws_config["placement"]
if not aws_config["spot"]:
instance_args["MinCount"] = 1
instance_args["MaxCount"] = 1
print("************************************************************")
print(instance_args["UserData"])
print("************************************************************")
if aws_config["spot"]:
instance_args["UserData"] = base64.b64encode(instance_args["UserData"].encode()).decode("utf-8")
spot_args = dict(
DryRun=dry,
InstanceCount=1,
LaunchSpecification=instance_args,
SpotPrice=aws_config["spot_price"],
# ClientToken=params_list[0]["exp_name"],
)
import pprint
pprint.pprint(spot_args)
if not dry:
response = ec2.request_spot_instances(**spot_args)
print(response)
spot_request_id = response['SpotInstanceRequests'][
0]['SpotInstanceRequestId']
for _ in range(10):
try:
ec2.create_tags(
Resources=[spot_request_id],
Tags=[
{'Key': 'Name', 'Value': params_list[0]["exp_name"]}
],
)
break
except botocore.exceptions.ClientError:
continue
else:
import pprint
pprint.pprint(instance_args)
ec2.create_instances(
DryRun=dry,
**instance_args
)
S3_CODE_PATH = None
def s3_sync_code(config, dry=False, added_project_directories=[]):
global S3_CODE_PATH
if S3_CODE_PATH is not None:
return S3_CODE_PATH
base = config.AWS_CODE_SYNC_S3_PATH
has_git = True
if config.FAST_CODE_SYNC:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
current_commit = None
file_name = str(timestamp) + "_" + hashlib.sha224(
subprocess.check_output(["pwd"]) + str(current_commit).encode() + str(timestamp).encode()
).hexdigest() + ".tar.gz"
file_path = "/tmp/" + file_name
tar_cmd = ["tar", "-zcvf", file_path, "-C", config.PROJECT_PATH]
for pattern in config.FAST_CODE_SYNC_IGNORES:
tar_cmd += ["--exclude", pattern]
tar_cmd += ["-h", "."]
for path in added_project_directories:
tar_cmd.append("-C")
tar_cmd.append(path)
tar_cmd += ["."]
remote_path = "%s/%s" % (base, file_name)
upload_cmd = ["aws", "s3", "cp", file_path, remote_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(" ".join(tar_cmd))
print(" ".join(upload_cmd))
print(" ".join(mujoco_key_cmd))
if not dry:
subprocess.check_call(tar_cmd)
subprocess.check_call(upload_cmd)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception as e:
print(e)
S3_CODE_PATH = remote_path
return remote_path
else:
try:
current_commit = subprocess.check_output(
["git", "rev-parse", "HEAD"]).strip().decode("utf-8")
clean_state = len(
subprocess.check_output(["git", "status", "--porcelain"])) == 0
except subprocess.CalledProcessError as _:
print("Warning: failed to execute git commands")
has_git = False
dir_hash = base64.b64encode(subprocess.check_output(["pwd"])).decode("utf-8")
code_path = "%s_%s" % (
dir_hash,
(current_commit if clean_state else "%s_dirty_%s" % (current_commit, timestamp)) if
has_git else timestamp
)
full_path = "%s/%s" % (base, code_path)
cache_path = "%s/%s" % (base, dir_hash)
cache_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[cache_path, full_path]
cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[".", full_path]
caching_cmds = ["aws", "s3", "cp", "--recursive"] + \
flatten(["--exclude", "%s" % pattern] for pattern in config.CODE_SYNC_IGNORES) + \
[full_path, cache_path]
mujoco_key_cmd = [
"aws", "s3", "sync", config.MUJOCO_KEY_PATH, "{}/.mujoco/".format(base)]
print(cache_cmds, cmds, caching_cmds, mujoco_key_cmd)
if not dry:
subprocess.check_call(cache_cmds)
subprocess.check_call(cmds)
subprocess.check_call(caching_cmds)
try:
subprocess.check_call(mujoco_key_cmd)
except Exception:
print('Unable to sync mujoco keys!')
S3_CODE_PATH = full_path
return full_path
def upload_file_to_s3(script_content):
import tempfile
import uuid
f = tempfile.NamedTemporaryFile(delete=False)
f.write(script_content.encode())
f.close()
remote_path = os.path.join(
config.AWS_CODE_SYNC_S3_PATH, "oversize_bash_scripts", str(uuid.uuid4()))
subprocess.check_call(["aws", "s3", "cp", f.name, remote_path])
os.unlink(f.name)
return remote_path
def to_lab_kube_pod(
params, docker_image, code_full_path,
python_command="python",
script='scripts/run_experiment.py',
is_gpu=False,
sync_s3_pkl=False,
periodic_sync=True,
periodic_sync_interval=15,
sync_all_data_node_to_s3=False,
terminate_machine=True
):
"""
:param params: The parameters for the experiment. If logging directory parameters are provided, we will create
docker volume mapping to make sure that the logging files are created at the correct locations
:param docker_image: docker image to run the command on
:param script: script command for running experiment
:return:
"""
log_dir = params.get("log_dir")
remote_log_dir = params.pop("remote_log_dir")
resources = params.pop("resources")
node_selector = params.pop("node_selector")
exp_prefix = params.pop("exp_prefix")
kube_env = [
{"name": k, "value": v}
for k, v in (params.pop("env", None) or dict()).items()
]
mkdir_p(log_dir)
pre_commands = list()
pre_commands.append('mkdir -p ~/.aws')
pre_commands.append('mkdir ~/.mujoco')
# fetch credentials from the kubernetes secret file
pre_commands.append('echo "[default]" >> ~/.aws/credentials')
pre_commands.append(
"echo \"aws_access_key_id = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_KEY)
pre_commands.append(
"echo \"aws_secret_access_key = %s\" >> ~/.aws/credentials" % config.AWS_ACCESS_SECRET)
s3_mujoco_key_path = config.AWS_CODE_SYNC_S3_PATH + '/.mujoco/'
pre_commands.append(
'aws s3 cp --recursive {} {}'.format(s3_mujoco_key_path, '~/.mujoco'))
if config.FAST_CODE_SYNC:
pre_commands.append('aws s3 cp %s /tmp/rllab_code.tar.gz' % code_full_path)
pre_commands.append('mkdir -p %s' % config.DOCKER_CODE_DIR)
pre_commands.append('tar -zxvf /tmp/rllab_code.tar.gz -C %s' % config.DOCKER_CODE_DIR)
else:
pre_commands.append('aws s3 cp --recursive %s %s' %
(code_full_path, config.DOCKER_CODE_DIR))
pre_commands.append('cd %s' % config.DOCKER_CODE_DIR)
pre_commands.append('mkdir -p %s' %
(log_dir))
if sync_all_data_node_to_s3:
print('Syncing all data from node to s3.')
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
if periodic_sync:
if sync_s3_pkl:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' --include '*.pkl' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
else:
pre_commands.append("""
while /bin/true; do
aws s3 sync --exclude '*' --include '*.csv' --include '*.json' {log_dir} {remote_log_dir} --region {aws_region} --quiet
sleep {periodic_sync_interval}
done & echo sync initiated""".format(log_dir=log_dir, remote_log_dir=remote_log_dir,
aws_region=config.AWS_REGION_NAME,
periodic_sync_interval=periodic_sync_interval))
# copy the file to s3 after execution
post_commands = list()
post_commands.append('aws s3 cp --recursive %s %s' %
(log_dir,
remote_log_dir))
if not terminate_machine:
post_commands.append('sleep infinity')
command_list = list()
if pre_commands is not None:
command_list.extend(pre_commands)
command_list.append("echo \"Running in docker\"")
command_list.append(
"%s 2>&1 | tee -a %s" % (
to_local_command(params, python_command=python_command, script=script),
"%s/stdouterr.log" % log_dir
)
)
if post_commands is not None:
command_list.extend(post_commands)
command = "; ".join(command_list)
pod_name = config.KUBE_PREFIX + params["exp_name"]
# underscore is not allowed in pod names
pod_name = pod_name.replace("_", "-")
print("Is gpu: ", is_gpu)
if not is_gpu:
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
return {
"apiVersion": "v1",
"kind": "Pod",
"metadata": {
"name": pod_name,
"labels": {
"owner": config.LABEL,
"expt": pod_name,
"exp_time": timestamp,
"exp_prefix": exp_prefix,
},
},
"spec": {
"containers": [
{
"name": "foo",
"image": docker_image,
"env": kube_env,
"command": [
"/bin/bash",
"-c",
"-li", # to load conda env file
command,
],
"resources": resources,
"imagePullPolicy": "Always",
# gpu specific
"volumeMounts": [
{
"name": "nvidia",
"mountPath": "/usr/local/nvidia",
"readOnly": True,
}
],
"securityContext": {
"privileged": True,
}
}
],
"volumes": [
{
"name": "nvidia",
"hostPath": {
"path": "/var/lib/docker/volumes/nvidia_driver_352.63/_data",
}
}
],
"restartPolicy": "Never",
"nodeSelector": node_selector,
"dnsPolicy": "Default",
}
}
def concretize(maybe_stub):
if isinstance(maybe_stub, StubMethodCall):
obj = concretize(maybe_stub.obj)
method = getattr(obj, maybe_stub.method_name)
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
return method(*args, **kwargs)
elif isinstance(maybe_stub, StubClass):
return maybe_stub.proxy_class
elif isinstance(maybe_stub, StubAttr):
obj = concretize(maybe_stub.obj)
attr_name = maybe_stub.attr_name
attr_val = getattr(obj, attr_name)
return concretize(attr_val)
elif isinstance(maybe_stub, StubObject):
if not hasattr(maybe_stub, "__stub_cache"):
args = concretize(maybe_stub.args)
kwargs = concretize(maybe_stub.kwargs)
try:
maybe_stub.__stub_cache = maybe_stub.proxy_class(
*args, **kwargs)
except Exception as e:
print(("Error while instantiating %s" % maybe_stub.proxy_class))
import traceback
traceback.print_exc()
ret = maybe_stub.__stub_cache
return ret
elif isinstance(maybe_stub, dict):
# make sure that there's no hidden caveat
ret = dict()
for k, v in maybe_stub.items():
ret[concretize(k)] = concretize(v)
return ret
elif isinstance(maybe_stub, (list, tuple)):
return maybe_stub.__class__(list(map(concretize, maybe_stub)))
else:
return maybe_stub
|
{
"content_hash": "9cf1b9081d43d3be7401175f5eb955ed",
"timestamp": "",
"source": "github",
"line_count": 1377,
"max_line_length": 174,
"avg_line_length": 39.75816993464052,
"alnum_prop": 0.5357005863334977,
"repo_name": "nosyndicate/pytorchrl",
"id": "fa7475a26487b08628c5873b3afa235761c88694",
"size": "54747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytorchrl/misc/instrument.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "311957"
}
],
"symlink_target": ""
}
|
'''
This script is a check for lookup at another check over ssh without
having an agent on the other side
'''
import os
import sys
import optparse
import base64
import subprocess
try:
import paramiko
except ImportError:
print "ERROR : this plugin needs the python-paramiko module. Please install it"
sys.exit(2)
# Ok try to load our directory to load the plugin utils.
my_dir = os.path.dirname(__file__)
sys.path.insert(0, my_dir)
try:
import schecks
except ImportError:
print "ERROR : this plugin needs the local schecks.py lib. Please install it"
sys.exit(2)
VERSION = "0.1"
def execute_check(client, check_path):
# Beware of the export!
stdin, stdout, stderr = client.exec_command(check_path)
lines = [line for line in stdout]
if len(lines) == 0:
return False
# Before return, close the client
client.close()
return lines[0].strip()
parser = optparse.OptionParser(
"%prog [options]", version="%prog " + VERSION)
parser.add_option('-H', '--hostname',
dest="hostname", help='Hostname to connect to')
parser.add_option('-p', '--port',
dest="port", type="int", default=22,
help='SSH port to connect to. Default : 22')
parser.add_option('-i', '--ssh-key',
dest="ssh_key_file", help='SSH key file to use. By default will take ~/.ssh/id_rsa.')
parser.add_option('-u', '--user',
dest="user", help='remote use to use. By default shinken.')
parser.add_option('-P', '--passphrase',
dest="passphrase", help='SSH key passphrase. By default will use void')
parser.add_option('-r', '--check_path',
dest="check_path", help='Path of the remote perfdata check to execute')
if __name__ == '__main__':
# Ok first job : parse args
opts, args = parser.parse_args()
if args:
parser.error("Does not accept any argument.")
hostname = opts.hostname
if not hostname:
print "Error : hostname parameter (-H) is mandatory"
sys.exit(2)
check_path = opts.check_path
if not check_path:
print "Error : check_path parameter (--check_path) is mandatory"
sys.exit(2)
port = opts.port
ssh_key_file = opts.ssh_key_file or os.path.expanduser('~/.ssh/id_rsa')
user = opts.user or 'shinken'
passphrase = opts.passphrase or ''
# Ok now connect, and try to get values for memory
client = schecks.connect(hostname, port, ssh_key_file, passphrase, user)
result = execute_check(client, check_path)
print result
sys.exit(0)
|
{
"content_hash": "b94b81ec0e2e37dd06c2adeeb37c8c90",
"timestamp": "",
"source": "github",
"line_count": 86,
"max_line_length": 103,
"avg_line_length": 29.813953488372093,
"alnum_prop": 0.6396255850234009,
"repo_name": "robinfourdeux/check-linux-by-ssh",
"id": "a1a38ca4cfac7f4a5c1fb271b1cbf41353f28d2b",
"size": "3759",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "check_ssh_proxy_check.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "126025"
}
],
"symlink_target": ""
}
|
__author__ = "orim"
import re
import pytest
import json
import inspect
import itertools
import sys
from attrdict import AttrDict
from pyfiglet import figlet_format
from pytest_scenario.exceptions import ImproperlyConfigured
from os.path import abspath
TEST_SCENARIOS_DIR = './sut/scenarios'
def pytest_addoption(parser):
parser.addoption("--scenario", action="store", dest='scenario_name', metavar='name',
help="states the scenario that should be tested")
parser.addoption("--repeat", action="store", default=1,
help="run all tests collected in a loop (default is 1 | infinite 0)")
def pytest_configure(config):
if config.pluginmanager.hasplugin('scenario') and config.option.scenario_name:
scenario_name = config.option.scenario_name
config._scenario = TestScenarioRunner(scenario_name)
config.pluginmanager.register(config._scenario, name=scenario_name)
else:
# Register "test_case" markers.
config_line = (
'test_case: test case description',
)
config.addinivalue_line('markers', config_line)
config._scenario = TestCaseRunner()
config.pluginmanager.register(config._scenario, name='test_case_runner')
def pytest_unconfigure(config):
scenario = getattr(config, '_scenario', None)
if scenario:
del config._scenario
config.pluginmanager.unregister(scenario)
class BaseRunner(object):
test_arg_fixture_binding_dict = {}
item_setup_dict = {}
def __init__(self):
self.tw = None
def pytest_generate_tests(self, metafunc):
raise NotImplementedError()
def pytest_collection_modifyitems(self, config, items):
for item in items:
try:
if not item.get_marker('skipif'):
fully_qualified_name = '.'.join([item.module.__name__, item.cls.__name__, item.name])
fixture_binding_dict = self.test_arg_fixture_binding_dict[fully_qualified_name]
for argname, fixture_config in fixture_binding_dict.items():
func, _, fixture_params = fixture_config
_, arg2fixturedefs = item.session._fixturemanager.getfixtureclosure([func], item)
if fixture_params:
item._request._pyfuncitem.callspec.params[func] = AttrDict(fixture_params)
item._fixtureinfo.name2fixturedefs[func] = arg2fixturedefs[func]
if argname in item.fixturenames:
self.item_setup_dict[item.nodeid] = item
except KeyError:
pass
def pytest_runtest_logstart(self, nodeid, location):
try:
item = self.item_setup_dict[nodeid]
except KeyError:
return
try:
fully_qualified_name = '.'.join([item.module.__name__, item.cls.__name__, item.name])
fixture_binding_dict = self.test_arg_fixture_binding_dict[fully_qualified_name]
for argname, fixture_config in fixture_binding_dict.items():
func, scope, _ = fixture_config
_, arg2fixturedefs = item.session._fixturemanager.getfixtureclosure([func], item)
fixture_def = arg2fixturedefs[func][0]
if scope and fixture_def.scope != scope:
fixture_def.finish()
fixture_def.scope = scope
try:
if not item._request:
item._initrequest()
item.funcargs[argname] = item._request.getfuncargvalue(func)
except AttributeError as e:
raise ImproperlyConfigured(', '.join([item.name, str(e)])) from e
except KeyError as e:
raise RuntimeError("unable to find a fixture function named {}".format(e))
def pytest_collection_finish(self, session):
self.tw = session.config.pluginmanager.getplugin('terminalreporter')._tw
def pytest_runtestloop(self, session):
try:
repeat = int(session.config.option.repeat)
except AttributeError:
return
assert isinstance(repeat, int), "Repeat must be an integer"
for i in itertools.count():
if i == repeat and i != 0:
break
if repeat == 0:
self.tw.write("repetition count: %d of %d\n" % (i+1, sys.maxsize), bold=True)
elif repeat != 1:
self.tw.write("repetition count: %d of %d\n" % (i+1, repeat), bold=True)
session.config.pluginmanager.getplugin("main").pytest_runtestloop(session)
return True
def pytest_runtest_teardown(self, item, nextitem):
self.tw.write('\n')
self.tw.sep("=", "{} {}".format(item.name, 'skipped' if item.get_marker('skipif') else 'finished'))
self.tw.write('\n')
class TestCaseRunner(BaseRunner):
id_counter = 0
def pytest_generate_tests(self, metafunc):
fully_qualified_name = '.'.join([metafunc.module.__name__, metafunc.cls.__name__, metafunc.function.__name__])
if hasattr(metafunc.function, "test_case"):
self.id_counter += 1
argnames = []
values = []
try:
test_params = metafunc.function.test_case.kwargs['test_params']
for argname, _ in test_params:
if argname in metafunc.fixturenames:
argnames.append(argname)
else:
raise ImproperlyConfigured(
"'{}' is not a valid argument for {}".format(argname, fully_qualified_name))
values = [argvalue for _, argvalue in test_params]
except KeyError:
pass
try:
fixture_binding = metafunc.function.test_case.kwargs['fixture_binding']
instance_id = '%s[%d]' % (fully_qualified_name, self.id_counter)
try:
for argname, fixture_config in fixture_binding:
try:
func = fixture_config['func']
scope = fixture_config.get('scope', None)
params = fixture_config.get('params', None)
except KeyError as e:
raise ImproperlyConfigured(
"missing '{}' key in while trying to bind a fixture to test param: "
"(test_param, {func='your fixture', scope='function \ class \ module \ session'})"
.format(e))
try:
self.test_arg_fixture_binding_dict[instance_id][argname] = (func, scope, params)
except KeyError:
self.test_arg_fixture_binding_dict[instance_id] = {}
self.test_arg_fixture_binding_dict[instance_id][argname] = (func, scope, params)
if argname in metafunc.fixturenames:
values.insert(0, func)
if argname not in argnames:
argnames.insert(0, argname)
else:
raise ImproperlyConfigured(
"'{}' is not a valid argument for {}".format(argname, fully_qualified_name))
except ValueError:
raise ImproperlyConfigured(
"\n{} - fixture_binding attribute should be a tuple containing at least 3 elements: "
"(argname, fixture, scope, params=None)".format(fully_qualified_name))
except KeyError:
pass
metafunc.parametrize(argnames, [values], ids=[str(self.id_counter)], scope="function")
class TestScenarioRunner(BaseRunner):
def __init__(self, scenario_name: str):
BaseRunner.__init__(self)
self._name = scenario_name
self.tests_dict, _ = self.generate_test_plan(scenario_name)
def generate_test_plan(self, scenario_name, parent_ref='', order=0):
tests_dict = {}
scenario_file_path = '{}/{}.json'.format(TEST_SCENARIOS_DIR, scenario_name)
try:
with open(scenario_file_path) as scenario_file:
scenario_config = json.load(scenario_file)
except FileNotFoundError:
raise RuntimeError("'{}' scenario is not defined (make sure {} is present)"
.format(scenario_name, abspath(scenario_file_path)))
id_counter = set()
for test_instance in scenario_config:
assert "id" in test_instance,\
"test case record in scenario '{}' is missing an id field.".format(scenario_name)
test_id = '-'.join([scenario_name, str(test_instance["id"])])
if parent_ref:
test_id = '.'.join([parent_ref, test_id])
assert test_id not in id_counter,\
"found a duplicate test id {} in scenario '{}'".format(test_instance["id"], scenario_name)
id_counter.add(test_id)
if test_instance.get('@ref', None):
sub_scenario_tests, order = self.generate_test_plan(test_instance['@ref'], test_id, order)
tests_dict.update(sub_scenario_tests)
else:
assert "test_name" in test_instance,\
"test case record in scenario '{}' is missing a test_name field.".format(scenario_name)
test_instance["id"] = test_id
order += 1
test_instance["order"] = order
tests_dict.update({'%s.%s.%s[%s]' % (test_instance["module_name"],
test_instance["class_name"],
test_instance["test_name"],
test_id): test_instance})
return tests_dict, order
def pytest_pycollect_makeitem(self, collector, name, obj):
tests_dict = self.tests_dict
if inspect.isfunction(obj) and name.startswith("test_") and isinstance(collector, pytest.Instance):
fully_qualified_name = '.'.join([obj.__module__, obj.__qualname__])
for test_id in tests_dict.keys():
if fully_qualified_name == re.sub('\[.*?\]$', '', test_id):
return
return []
def pytest_collection_modifyitems(self, config, items):
grouped_items = {}
for item in items:
fully_qualified_name = '.'.join([item.module.__name__, item.cls.__name__, item.name])
try:
test = self.tests_dict[fully_qualified_name]
except KeyError:
items.remove(item)
continue
try:
if test['skip']:
item.add_marker(pytest.mark.skipif)
else:
item.keywords['skipif'] = None
item.keywords['skip'] = None
if test['xfail']:
item.add_marker(pytest.mark.xfail)
else:
item.keywords['xfail'] = None
grouped_items.setdefault(test['order'], []).append(item)
except KeyError as e:
raise ImproperlyConfigured('missing {} field in {} configuration'.format(e, item.name))
if grouped_items:
items[:] = self.order_items(grouped_items)
BaseRunner.pytest_collection_modifyitems(self, config, items)
def order_items(self, grouped_items):
# Algorithm provided by https://github.com/ftobia
if grouped_items:
unordered_items = grouped_items.pop(None, None)
sorted_items = []
prev_key = 0
for key, ordered_items in grouped_items.items():
if unordered_items and key < 0 <= prev_key:
sorted_items.extend(unordered_items)
unordered_items = None
prev_key = key
sorted_items.extend(ordered_items)
if unordered_items:
sorted_items.extend(unordered_items)
return sorted_items
def pytest_generate_tests(self, metafunc):
fully_qualified_name = '.'.join([metafunc.module.__name__, metafunc.cls.__name__, metafunc.function.__name__])
test_instances = [test for test in self.tests_dict.items()
if fully_qualified_name == re.sub('\[.*?\]$', '', test[0])]
idlist = []
argnames = []
argvalues = []
try:
params = test_instances[0][1]['test_params'].items()
except KeyError:
try:
source_el = 'scenario %s' % re.search('.*?\[(.*?)\]', test_instances[0][0]).group(1)
except AttributeError:
# should never happen
source_el = test_instances[0][0]
raise ImproperlyConfigured('missing \'test_params\' field in {} configuration'.format(source_el))
for argname, _ in params:
if argname in metafunc.fixturenames:
argnames.append(argname)
else:
raise ImproperlyConfigured("'{}' is not a valid argument for {}".format(argname, fully_qualified_name))
for instance_id, test_config in test_instances:
try:
idlist.append(test_config['id'])
values = [x[1] for x in test_config['test_params'].items()]
fixture_binding = test_config['fixture_binding']
for argname, fixture_config in fixture_binding.items():
try:
func = fixture_config['func']
scope = fixture_config.get('scope', None)
params = fixture_config.get('params', None)
except KeyError as e:
raise ImproperlyConfigured(
"missing {} key in {} fixture binding configuration".format(e, argname))
try:
self.test_arg_fixture_binding_dict[instance_id][argname] = (func, scope, params)
except KeyError:
self.test_arg_fixture_binding_dict[instance_id] = {}
self.test_arg_fixture_binding_dict[instance_id][argname] = (func, scope, params)
if argname in metafunc.fixturenames:
values.insert(0, func)
if argname not in argnames:
argnames.insert(0, argname)
else:
raise ImproperlyConfigured(
"'{}' is not a valid argument for {}".format(argname, fully_qualified_name))
argvalues.append(values)
except KeyError as e:
raise ImproperlyConfigured(
'missing {} field in test {} configuration'.format(e, instance_id))
if idlist:
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="function")
def pytest_collection_finish(self, session):
self.tw = session.config.pluginmanager.getplugin('terminalreporter')._tw
self.tw.write("selected scenario: \n", bold=True)
self.tw.write(figlet_format(self._name + '\n'), bold=True, blink=True)
|
{
"content_hash": "77ce0b44f9d8fc66ae5849ae5522cd0a",
"timestamp": "",
"source": "github",
"line_count": 328,
"max_line_length": 119,
"avg_line_length": 47.35670731707317,
"alnum_prop": 0.5406553788707912,
"repo_name": "OriMenashe/pytest-scenario",
"id": "f40361991a59b988770ce3d534259435a1694dac",
"size": "15533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest_scenario/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "19550"
}
],
"symlink_target": ""
}
|
import time
from datetime import timedelta
import logging
from traceback import format_exc
from django.utils import timezone
from django.db.utils import ProgrammingError
from django.core.cache import cache
from .models import RepeatingTask
from .utils import redis_connection
logger = logging.getLogger('cq')
def perform_scheduling():
logger.debug('cq-scheduler: performing scheduling started')
with cache.lock('cq:scheduler:lock', timeout=10):
logger.debug('cq-scheduler: checking for scheduled tasks')
now = timezone.now()
try:
rtasks = RepeatingTask.objects.filter(next_run__lte=now)
logger.info('cq-scheduler: have {} repeating task(s) ready'.format(rtasks.count()))
for rt in rtasks:
logger.info('cq-scheduler: submitting {}'.format(rt))
try:
rt.submit()
except:
# Don't terminate if a submit fails.
logger.error(format_exc())
except ProgrammingError:
logger.warning('CQ scheduler not running, DB is out of date.')
logger.debug('cq-scheduler: performing scheduling finished')
def scheduler_internal():
logger.debug('cq-scheduler: determining winning scheduler')
am_scheduler = False
with redis_connection() as conn:
if conn.setnx('cq:scheduler', 'dummy'):
conn.expire('cq:scheduler', 30)
am_scheduler = True
if am_scheduler:
logger.info('cq-scheduler: winner')
perform_scheduling()
else:
logger.debug('cq-scheduler: loser')
now = timezone.now()
delay = ((now + timedelta(minutes=1)).replace(second=0, microsecond=0) - now).total_seconds()
logger.debug('cq-scheduler: waiting {} seconds for next schedule attempt'.format(delay))
time.sleep(delay)
def scheduler(*args, **kwargs):
logger.info('cq-scheduler: Scheduler thread active.')
while 1:
try:
scheduler_internal()
except Exception as ex:
logger.error(format_exc())
time.sleep(0.5)
|
{
"content_hash": "e9267f28a43ea4c8c51bbd3478c32e94",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 97,
"avg_line_length": 33.88709677419355,
"alnum_prop": 0.6358876725368872,
"repo_name": "furious-luke/django-cq",
"id": "1a38c3d9858de002d98c3cc80dc2056a867eafe0",
"size": "2101",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cq/scheduler.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "84931"
}
],
"symlink_target": ""
}
|
import os
import numpy as np
from tqdm import tqdm
import cv2
import glob
from utils import *
from constants import *
from models.model_bce import ModelBCE
def test(path_to_images, path_output_maps, model_to_test=None):
list_img_files = [k.split('/')[-1].split('.')[0] for k in glob.glob(os.path.join(path_to_images, '*'))]
# Load Data
list_img_files.sort()
for curr_file in tqdm(list_img_files, ncols=20):
print os.path.join(path_to_images, curr_file + '.jpg')
img = cv2.cvtColor(cv2.imread(os.path.join(path_to_images, curr_file + '.jpg'), cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB)
predict(model=model_to_test, image_stimuli=img, name=curr_file, path_output_maps=path_output_maps)
def main():
# Create network
model = ModelBCE(INPUT_SIZE[0], INPUT_SIZE[1], batch_size=8)
# Here need to specify the epoch of model sanpshot
load_weights(model.net['output'], path='gen_', epochtoload=90)
# Here need to specify the path to images and output path
test(path_to_images='../images/', path_output_maps='../saliency/', model_to_test=model)
if __name__ == "__main__":
main()
|
{
"content_hash": "95f02390541a6b87f12ee80b952d73c8",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 125,
"avg_line_length": 37.86666666666667,
"alnum_prop": 0.6716549295774648,
"repo_name": "imatge-upc/saliency-salgan-2017",
"id": "0701fc60672d2e35027442de2fa28eb4d29e1f93",
"size": "1136",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/03-predict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52735"
}
],
"symlink_target": ""
}
|
"""Support for Google Domains."""
import asyncio
from datetime import timedelta
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.const import CONF_DOMAIN, CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
_LOGGER = logging.getLogger(__name__)
DOMAIN = "google_domains"
INTERVAL = timedelta(minutes=5)
DEFAULT_TIMEOUT = 10
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Initialize the Google Domains component."""
domain = config[DOMAIN].get(CONF_DOMAIN)
user = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
timeout = config[DOMAIN].get(CONF_TIMEOUT)
session = async_get_clientsession(hass)
result = await _update_google_domains(
hass, session, domain, user, password, timeout
)
if not result:
return False
async def update_domain_interval(now):
"""Update the Google Domains entry."""
await _update_google_domains(hass, session, domain, user, password, timeout)
async_track_time_interval(hass, update_domain_interval, INTERVAL)
return True
async def _update_google_domains(hass, session, domain, user, password, timeout):
"""Update Google Domains."""
url = f"https://{user}:{password}@domains.google.com/nic/update"
params = {"hostname": domain}
try:
async with async_timeout.timeout(timeout):
resp = await session.get(url, params=params)
body = await resp.text()
if body.startswith("good") or body.startswith("nochg"):
return True
_LOGGER.warning("Updating Google Domains failed: %s => %s", domain, body)
except aiohttp.ClientError:
_LOGGER.warning("Can't connect to Google Domains API")
except asyncio.TimeoutError:
_LOGGER.warning("Timeout from Google Domains API for domain: %s", domain)
return False
|
{
"content_hash": "6997a392d40b668f2eb3804441633a42",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 87,
"avg_line_length": 29.896551724137932,
"alnum_prop": 0.6770472895040369,
"repo_name": "toddeye/home-assistant",
"id": "c7f7e632bd66b15fc6f352ffc40c7ed35bcca2f7",
"size": "2601",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "homeassistant/components/google_domains/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3005"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "47414832"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
"""
Module which implements propagation modelling functions.
Created on Sun Feb 26 19:56:10 2017
@author: Ashiv Dhondea
Edits:
26/02/17: created file and added the function fnCalculate_LinkTime
26/02/17: created the function fnCalculate_DownlinkTime_Iter
02/03/17: included the module MathsFn which is called by fnCalculate_DownlinkTime_Iter
02/03/17: added the function fnCalculate_UplinkTime_Iter
24/03/17: fixed the function fnCalculate_UplinkTime_Iter
04/04/17: created the function fnCalculate_RangeRate
19/04/17: fixed a typo in fnCalculate_RangeRate
19/04/17: created the function fnCalculate_Bistatic_Range
19/04/17: Created two versions of fnCalculate_RangeRate: one for bistatic range rate and one for monostatic range rate
Reference:
1. Satellite Orbits: Models, Methods, Applications. Montenbruck, Gill. 2000. Section 6.2.2
"""
import numpy as np
import MathsFunctions as MathFn
# --------------------------------------------------------------------------- #
def fnCalculate_LinkTime(target_pos,radar_pos,speed_light):
"""
Calculates the signal propagation time over a link.
target_pos = position of target
radar_pos = position of radar
speed_light = speed of light in the medium of interest
Called by fnCalculate_DownlinkTime_Iter
Created: 25 February 2017
"""
dvec = target_pos - radar_pos;
tau = (1./speed_light)*np.linalg.norm(dvec);
return tau # validated. lighttime02.py
def fnCalculate_DownlinkTime_Iter(target_pos,radar_pos,timevec,index,speed_light,errTol):
"""
Iterative procedure to find the downlink light-time for a particular signal
received at the ground station.
target_pos = position vectors of the target at all time stamps.
radar_pos = position vectors of the ground station at all time stamps.
timevec = vector of time variable.
index = time index for which the downlink time is to be found.
speed_light = speed of light in the medium of interest.
errTol = error tolerance is the evaluated light-time.
tau_i = light-time in seconds for the downlink.
time_idx = time index at which light was emitted
time_el = light-time in seconds for the downlink according to our timevec
Calls fnCalculate_LinkTime
Created: 26 February 2017
"""
tau_i = 0.;
for i in range(len(timevec)):
tau_0 = tau_i;
# Find position of the target and the station at the moment of interest.
tgt_pos = target_pos[:,index - i];
site_pos = radar_pos[:,index];
# Evaluate the downlink time.
tau_i = fnCalculate_LinkTime(tgt_pos,site_pos,speed_light);
if abs(tau_i - tau_0) < errTol: # if within the tolerance limits, stop the iteration.
#~ print 'time of transmission of pulse'
#~ print timevec[index] - tau_i
time_el,time_idx = MathFn.find_nearest(timevec,timevec[index] - tau_i)
#print time_idx
#~ print 'light-time approx in [s] '
#~ print tau_i
break
return tau_i,time_el,time_idx # validated. 02.03.17 in lighttime03.py
def fnCalculate_UplinkTime_Iter(target_pos,radar_pos,timevec,index,speed_light,errTol,index_d,tau_d):
"""
Iterative procedure to find the uplink light-time for a particular signal
received at the ground station.
target_pos = position vectors of the target at all time stamps.
radar_pos = position vectors of the ground station at all time stamps.
timevec = vector of time variable.
index = time index for which the uplink time is to be found.
speed_light = speed of light in the medium of interest.
errTol = error tolerance is the evaluated light-time.
index_d = the downlink time index
tau_d = the downlink time
tau_i = light-time in seconds for the uplink.
Calls fnCalculate_LinkTime
Created: 01 March 2017
Edited:
02.03.17: fixed a mistake in the expression for the time index of the uplink time.
02.03.17: fixed initialization of tau_i
24/03/17: fixed the code. validation in testlighttime_01.py
"""
tau_i = tau_d;
for i in range(0,len(timevec)-index_d+index):
# print 'i'
# print i
tau_0 = tau_i;
# print 'tau_0'
# print tau_0
# Find position of the target and the station at the moment of interest.
tgt_pos = target_pos[:,index ];
site_pos = radar_pos[:,index - i]; # 24/03/17: see proof of this being right in testlighttime_01.py
# print index
# print index_d
# Evaluate the uplink time.
tau_i = fnCalculate_LinkTime(tgt_pos,site_pos,speed_light);
# print 'tau_i'
# print tau_i
if abs(tau_i - tau_0) < errTol: # if within the tolerance limits, stop the iteration.
# print 'uplink time found'
# print 'time of transmission of pulse'
# print timevec[index] - tau_i
time_el,time_idx = MathFn.find_nearest(timevec,timevec[index] - tau_d - tau_i) #24/03/17
# print time_idx
# print 'light-time approx in [s] '
# print tau_i
break
return tau_i,time_idx # the uplink time and the time index for it.
# --------------------------------------------------------------------------------------------------------------- #
def fnCalculate_Bistatic_RangeRate(speed_light,tau_u1,tau_d1,tau_u2,tau_d2,tc):
"""
Calculate the average range rate. eqn 6.37 in Montenbruck 2000.
tc = length of integration interval, i.e. length of CPI
Created: 04/04/17
"""
range_rate = (speed_light/tc)*(tau_u2+tau_d2-tau_u1-tau_d1); # removed 0.5 factor. 19.04.17
return range_rate
def fnCalculate_Bistatic_Range(speed_light,tau_d,tau_u):
"""
Calculate the bistatic range.
Created: 19/04/17
"""
tau = np.add(tau_d,tau_u);
actual_range = speed_light*tau;
return actual_range
def fnCalculate_Monostatic_RangeRate(speed_light,tau_u1,tau_d1,tau_u2,tau_d2,tc):
"""
Calculate the average range rate. eqn 6.37 in Montenbruck 2000.
tc = length of integration interval, i.e. length of CPI
Created: 19/04/17
"""
range_rate = (0.5*speed_light/tc)*(tau_u2+tau_d2-tau_u1-tau_d1);
return range_rate
def fnCalculate_Monostatic_Range(speed_light,tau_d,tau_u):
"""
Calculate the monostatic range.
Created: 19/04/17
"""
tau = np.add(tau_d,tau_u);
actual_range = 0.5*speed_light*tau;
return actual_range
|
{
"content_hash": "5cbe0ee191dd7c9abd71650bc62417eb",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 118,
"avg_line_length": 40.01204819277108,
"alnum_prop": 0.6341463414634146,
"repo_name": "AshivDhondea/SORADSIM",
"id": "8184a89f52e08c5f4ede506af7276e7adf1fec5e",
"size": "6667",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "modules/PropagationModels.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1189967"
},
{
"name": "Python",
"bytes": "484131"
}
],
"symlink_target": ""
}
|
"""Apache Configuration based off of Augeas Configurator."""
import logging
import os
import re
import shutil
import socket
import subprocess
import zope.interface
from acme import challenges
from letsencrypt import achallenges
from letsencrypt import constants as core_constants
from letsencrypt import errors
from letsencrypt import interfaces
from letsencrypt import le_util
from letsencrypt.plugins import common
from letsencrypt_apache import augeas_configurator
from letsencrypt_apache import constants
from letsencrypt_apache import display_ops
from letsencrypt_apache import dvsni
from letsencrypt_apache import obj
from letsencrypt_apache import parser
logger = logging.getLogger(__name__)
# TODO: Augeas sections ie. <VirtualHost>, <IfModule> beginning and closing
# tags need to be the same case, otherwise Augeas doesn't recognize them.
# This is not able to be completely remedied by regular expressions because
# Augeas views <VirtualHost> </Virtualhost> as an error. This will just
# require another check_parsing_errors() after all files are included...
# (after a find_directive search is executed currently). It can be a one
# time check however because all of LE's transactions will ensure
# only properly formed sections are added.
# Note: This protocol works for filenames with spaces in it, the sites are
# properly set up and directives are changed appropriately, but Apache won't
# recognize names in sites-enabled that have spaces. These are not added to the
# Apache configuration. It may be wise to warn the user if they are trying
# to use vhost filenames that contain spaces and offer to change ' ' to '_'
# Note: FILEPATHS and changes to files are transactional. They are copied
# over before the updates are made to the existing files. NEW_FILES is
# transactional due to the use of register_file_creation()
class ApacheConfigurator(augeas_configurator.AugeasConfigurator):
# pylint: disable=too-many-instance-attributes,too-many-public-methods
"""Apache configurator.
State of Configurator: This code has been tested under Ubuntu 12.04
Apache 2.2 and this code works for Ubuntu 14.04 Apache 2.4. Further
notes below.
This class was originally developed for Apache 2.2 and I have been slowly
transitioning the codebase to work with all of the 2.4 features.
I have implemented most of the changes... the missing ones are
mod_ssl.c vs ssl_mod, and I need to account for configuration variables.
This class can adequately configure most typical configurations but
is not ready to handle very complex configurations.
.. todo:: Add support for config file variables Define rootDir /var/www/
.. todo:: Add proper support for module configuration
The API of this class will change in the coming weeks as the exact
needs of clients are clarified with the new and developing protocol.
:ivar config: Configuration.
:type config: :class:`~letsencrypt.interfaces.IConfig`
:ivar parser: Handles low level parsing
:type parser: :class:`~letsencrypt_apache.parser`
:ivar tup version: version of Apache
:ivar list vhosts: All vhosts found in the configuration
(:class:`list` of :class:`~letsencrypt_apache.obj.VirtualHost`)
:ivar dict assoc: Mapping between domains and vhosts
"""
zope.interface.implements(interfaces.IAuthenticator, interfaces.IInstaller)
zope.interface.classProvides(interfaces.IPluginFactory)
description = "Apache Web Server - Alpha"
@classmethod
def add_parser_arguments(cls, add):
add("server-root", default=constants.CLI_DEFAULTS["server_root"],
help="Apache server root directory.")
add("ctl", default=constants.CLI_DEFAULTS["ctl"],
help="Path to the 'apache2ctl' binary, used for 'configtest' and "
"retrieving Apache2 version number.")
add("enmod", default=constants.CLI_DEFAULTS["enmod"],
help="Path to the Apache 'a2enmod' binary.")
add("init-script", default=constants.CLI_DEFAULTS["init_script"],
help="Path to the Apache init script (used for server "
"reload/restart).")
add("le-vhost-ext", default=constants.CLI_DEFAULTS["le_vhost_ext"],
help="SSL vhost configuration extension.")
def __init__(self, *args, **kwargs):
"""Initialize an Apache Configurator.
:param tup version: version of Apache as a tuple (2, 4, 7)
(used mostly for unittesting)
"""
version = kwargs.pop("version", None)
super(ApacheConfigurator, self).__init__(*args, **kwargs)
# Verify that all directories and files exist with proper permissions
if os.geteuid() == 0:
self.verify_setup()
# Add name_server association dict
self.assoc = dict()
# Add number of outstanding challenges
self._chall_out = 0
# These will be set in the prepare function
self.parser = None
self.version = version
self.vhosts = None
self._enhance_func = {"redirect": self._enable_redirect}
@property
def mod_ssl_conf(self):
"""Full absolute path to SSL configuration file."""
return os.path.join(self.config.config_dir, constants.MOD_SSL_CONF_DEST)
def prepare(self):
"""Prepare the authenticator/installer."""
self.parser = parser.ApacheParser(
self.aug, self.conf("server-root"), self.mod_ssl_conf)
# Check for errors in parsing files with Augeas
self.check_parsing_errors("httpd.aug")
# Set Version
if self.version is None:
self.version = self.get_version()
# Get all of the available vhosts
self.vhosts = self.get_virtual_hosts()
# Enable mod_ssl if it isn't already enabled
# This is Let's Encrypt... we enable mod_ssl on initialization :)
# TODO: attempt to make the check faster... this enable should
# be asynchronous as it shouldn't be that time sensitive
# on initialization
self._prepare_server_https()
temp_install(self.mod_ssl_conf)
def deploy_cert(self, domain, cert_path, key_path, chain_path=None):
"""Deploys certificate to specified virtual host.
Currently tries to find the last directives to deploy the cert in
the VHost associated with the given domain. If it can't find the
directives, it searches the "included" confs. The function verifies that
it has located the three directives and finally modifies them to point
to the correct destination. After the certificate is installed, the
VirtualHost is enabled if it isn't already.
.. todo:: Make sure last directive is changed
.. todo:: Might be nice to remove chain directive if none exists
This shouldn't happen within letsencrypt though
"""
vhost = self.choose_vhost(domain)
path = {}
path["cert_path"] = self.parser.find_dir(parser.case_i(
"SSLCertificateFile"), None, vhost.path)
path["cert_key"] = self.parser.find_dir(parser.case_i(
"SSLCertificateKeyFile"), None, vhost.path)
# Only include if a certificate chain is specified
if chain_path is not None:
path["chain_path"] = self.parser.find_dir(
parser.case_i("SSLCertificateChainFile"), None, vhost.path)
if not path["cert_path"] or not path["cert_key"]:
# Throw some can't find all of the directives error"
logger.warn(
"Cannot find a cert or key directive in %s. "
"VirtualHost was not modified", vhost.path)
# Presumably break here so that the virtualhost is not modified
return False
logger.info("Deploying Certificate to VirtualHost %s", vhost.filep)
self.aug.set(path["cert_path"][0], cert_path)
self.aug.set(path["cert_key"][0], key_path)
if chain_path is not None:
if not path["chain_path"]:
self.parser.add_dir(
vhost.path, "SSLCertificateChainFile", chain_path)
else:
self.aug.set(path["chain_path"][0], chain_path)
self.save_notes += ("Changed vhost at %s with addresses of %s\n" %
(vhost.filep,
", ".join(str(addr) for addr in vhost.addrs)))
self.save_notes += "\tSSLCertificateFile %s\n" % cert_path
self.save_notes += "\tSSLCertificateKeyFile %s\n" % key_path
if chain_path is not None:
self.save_notes += "\tSSLCertificateChainFile %s\n" % chain_path
# Make sure vhost is enabled
if not vhost.enabled:
self.enable_site(vhost)
def choose_vhost(self, target_name):
"""Chooses a virtual host based on the given domain name.
If there is no clear virtual host to be selected, the user is prompted
with all available choices.
:param str target_name: domain name
:returns: ssl vhost associated with name
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If no vhost is available
"""
# Allows for domain names to be associated with a virtual host
# Client isn't using create_dn_server_assoc(self, dn, vh) yet
if target_name in self.assoc:
return self.assoc[target_name]
# Check for servernames/aliases for ssl hosts
for vhost in self.vhosts:
if vhost.ssl and target_name in vhost.names:
self.assoc[target_name] = vhost
return vhost
# Checking for domain name in vhost address
# This technique is not recommended by Apache but is technically valid
target_addr = common.Addr((target_name, "443"))
for vhost in self.vhosts:
if target_addr in vhost.addrs:
self.assoc[target_name] = vhost
return vhost
# Check for non ssl vhosts with servernames/aliases == "name"
for vhost in self.vhosts:
if not vhost.ssl and target_name in vhost.names:
vhost = self.make_vhost_ssl(vhost)
self.assoc[target_name] = vhost
return vhost
vhost = display_ops.select_vhost(target_name, self.vhosts)
if vhost is not None:
self.assoc[target_name] = vhost
else:
logger.error(
"No vhost exists with servername or alias of: %s. "
"No vhost was selected. Please specify servernames "
"in the Apache config", target_name)
raise errors.PluginError("No vhost selected")
# TODO: Ask the user if they would like to add ServerName/Alias to VH
return vhost
# # No matches, search for the default
# for vhost in self.vhosts:
# if "_default_:443" in vhost.addrs:
# return vhost
def create_dn_server_assoc(self, domain, vhost):
"""Create an association between a domain name and virtual host.
Helps to choose an appropriate vhost
:param str domain: domain name to associate
:param vhost: virtual host to associate with domain
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
self.assoc[domain] = vhost
def get_all_names(self):
"""Returns all names found in the Apache Configuration.
:returns: All ServerNames, ServerAliases, and reverse DNS entries for
virtual host addresses
:rtype: set
"""
all_names = set()
# Kept in same function to avoid multiple compilations of the regex
priv_ip_regex = (r"(^127\.0\.0\.1)|(^10\.)|(^172\.1[6-9]\.)|"
r"(^172\.2[0-9]\.)|(^172\.3[0-1]\.)|(^192\.168\.)")
private_ips = re.compile(priv_ip_regex)
for vhost in self.vhosts:
all_names.update(vhost.names)
for addr in vhost.addrs:
# If it isn't a private IP, do a reverse DNS lookup
if not private_ips.match(addr.get_addr()):
try:
socket.inet_aton(addr.get_addr())
all_names.add(socket.gethostbyaddr(addr.get_addr())[0])
except (socket.error, socket.herror, socket.timeout):
continue
return all_names
def _add_servernames(self, host):
"""Helper function for get_virtual_hosts().
:param host: In progress vhost whose names will be added
:type host: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
name_match = self.aug.match(("%s//*[self::directive=~regexp('%s')] | "
"%s//*[self::directive=~regexp('%s')]" %
(host.path,
parser.case_i("ServerName"),
host.path,
parser.case_i("ServerAlias"))))
for name in name_match:
args = self.aug.match(name + "/*")
for arg in args:
host.add_name(self.aug.get(arg))
def _create_vhost(self, path):
"""Used by get_virtual_hosts to create vhost objects
:param str path: Augeas path to virtual host
:returns: newly created vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
"""
addrs = set()
args = self.aug.match(path + "/arg")
for arg in args:
addrs.add(common.Addr.fromstring(self.aug.get(arg)))
is_ssl = False
if self.parser.find_dir(
parser.case_i("SSLEngine"), parser.case_i("on"), path):
is_ssl = True
filename = get_file_path(path)
is_enabled = self.is_site_enabled(filename)
vhost = obj.VirtualHost(filename, path, addrs, is_ssl, is_enabled)
self._add_servernames(vhost)
return vhost
# TODO: make "sites-available" a configurable directory
def get_virtual_hosts(self):
"""Returns list of virtual hosts found in the Apache configuration.
:returns: List of :class:`~letsencrypt_apache.obj.VirtualHost`
objects found in configuration
:rtype: list
"""
# Search sites-available, httpd.conf for possible virtual hosts
paths = self.aug.match(
("/files%s/sites-available//*[label()=~regexp('%s')]" %
(self.parser.root, parser.case_i("VirtualHost"))))
vhs = []
for path in paths:
vhs.append(self._create_vhost(path))
return vhs
def is_name_vhost(self, target_addr):
r"""Returns if vhost is a name based vhost
NameVirtualHost was deprecated in Apache 2.4 as all VirtualHosts are
now NameVirtualHosts. If version is earlier than 2.4, check if addr
has a NameVirtualHost directive in the Apache config
:param str target_addr: vhost address ie. \*:443
:returns: Success
:rtype: bool
"""
# Mixed and matched wildcard NameVirtualHost with VirtualHost
# behavior is undefined. Make sure that an exact match exists
# search for NameVirtualHost directive for ip_addr
# note ip_addr can be FQDN although Apache does not recommend it
return (self.version >= (2, 4) or
self.parser.find_dir(
parser.case_i("NameVirtualHost"),
parser.case_i(str(target_addr))))
def add_name_vhost(self, addr):
"""Adds NameVirtualHost directive for given address.
:param str addr: Address that will be added as NameVirtualHost directive
"""
path = self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(
self.parser.loc["name"]), "NameVirtualHost", str(addr))
self.save_notes += "Setting %s to be NameBasedVirtualHost\n" % addr
self.save_notes += "\tDirective added to %s\n" % path
def _prepare_server_https(self):
"""Prepare the server for HTTPS.
Make sure that the ssl_module is loaded and that the server
is appropriately listening on port 443.
"""
if not self.mod_loaded("ssl_module"):
logger.info("Loading mod_ssl into Apache Server")
self.enable_mod("ssl")
# Check for Listen 443
# Note: This could be made to also look for ip:443 combo
# TODO: Need to search only open directives and IfMod mod_ssl.c
if len(self.parser.find_dir(parser.case_i("Listen"), "443")) == 0:
logger.debug("No Listen 443 directive found. Setting the "
"Apache Server to Listen on port 443")
path = self.parser.add_dir_to_ifmodssl(
parser.get_aug_path(self.parser.loc["listen"]), "Listen", "443")
self.save_notes += "Added Listen 443 directive to %s\n" % path
def make_server_sni_ready(self, vhost, default_addr="*:443"):
"""Checks to see if the server is ready for SNI challenges.
:param vhost: VirtualHost to check SNI compatibility
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param str default_addr: TODO - investigate function further
"""
if self.version >= (2, 4):
return
# Check for NameVirtualHost
# First see if any of the vhost addresses is a _default_ addr
for addr in vhost.addrs:
if addr.get_addr() == "_default_":
if not self.is_name_vhost(default_addr):
logger.debug("Setting all VirtualHosts on %s to be "
"name based vhosts", default_addr)
self.add_name_vhost(default_addr)
# No default addresses... so set each one individually
for addr in vhost.addrs:
if not self.is_name_vhost(addr):
logger.debug("Setting VirtualHost at %s to be a name "
"based virtual host", addr)
self.add_name_vhost(addr)
def make_vhost_ssl(self, nonssl_vhost): # pylint: disable=too-many-locals
"""Makes an ssl_vhost version of a nonssl_vhost.
Duplicates vhost and adds default ssl options
New vhost will reside as (nonssl_vhost.path) +
``letsencrypt_apache.constants.CLI_DEFAULTS["le_vhost_ext"]``
.. note:: This function saves the configuration
:param nonssl_vhost: Valid VH that doesn't have SSLEngine on
:type nonssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: SSL vhost
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost`
:raises .errors.PluginError: If more than one virtual host is in
the file or if plugin is unable to write/read vhost files.
"""
avail_fp = nonssl_vhost.filep
# Get filepath of new ssl_vhost
if avail_fp.endswith(".conf"):
ssl_fp = avail_fp[:-(len(".conf"))] + self.conf("le_vhost_ext")
else:
ssl_fp = avail_fp + self.conf("le_vhost_ext")
# First register the creation so that it is properly removed if
# configuration is rolled back
self.reverter.register_file_creation(False, ssl_fp)
try:
with open(avail_fp, "r") as orig_file:
with open(ssl_fp, "w") as new_file:
new_file.write("<IfModule mod_ssl.c>\n")
for line in orig_file:
new_file.write(line)
new_file.write("</IfModule>\n")
except IOError:
logger.fatal("Error writing/reading to file in make_vhost_ssl")
raise errors.PluginError("Unable to write/read in make_vhost_ssl")
self.aug.load()
ssl_addrs = set()
# change address to address:443
addr_match = "/files%s//* [label()=~regexp('%s')]/arg"
ssl_addr_p = self.aug.match(
addr_match % (ssl_fp, parser.case_i("VirtualHost")))
for addr in ssl_addr_p:
old_addr = common.Addr.fromstring(
str(self.aug.get(addr)))
ssl_addr = old_addr.get_addr_obj("443")
self.aug.set(addr, str(ssl_addr))
ssl_addrs.add(ssl_addr)
# Add directives
vh_p = self.aug.match("/files%s//* [label()=~regexp('%s')]" %
(ssl_fp, parser.case_i("VirtualHost")))
if len(vh_p) != 1:
logger.error("Error: should only be one vhost in %s", avail_fp)
raise errors.PluginError("Only one vhost per file is allowed")
self.parser.add_dir(vh_p[0], "SSLCertificateFile",
"/etc/ssl/certs/ssl-cert-snakeoil.pem")
self.parser.add_dir(vh_p[0], "SSLCertificateKeyFile",
"/etc/ssl/private/ssl-cert-snakeoil.key")
self.parser.add_dir(vh_p[0], "Include", self.parser.loc["ssl_options"])
# Log actions and create save notes
logger.info("Created an SSL vhost at %s", ssl_fp)
self.save_notes += "Created ssl vhost at %s\n" % ssl_fp
self.save()
# We know the length is one because of the assertion above
ssl_vhost = self._create_vhost(vh_p[0])
self.vhosts.append(ssl_vhost)
# NOTE: Searches through Augeas seem to ruin changes to directives
# The configuration must also be saved before being searched
# for the new directives; For these reasons... this is tacked
# on after fully creating the new vhost
need_to_save = False
# See if the exact address appears in any other vhost
for addr in ssl_addrs:
for vhost in self.vhosts:
if (ssl_vhost.filep != vhost.filep and addr in vhost.addrs and
not self.is_name_vhost(addr)):
self.add_name_vhost(addr)
logger.info("Enabling NameVirtualHosts on %s", addr)
need_to_save = True
if need_to_save:
self.save()
return ssl_vhost
def supported_enhancements(self): # pylint: disable=no-self-use
"""Returns currently supported enhancements."""
return ["redirect"]
def enhance(self, domain, enhancement, options=None):
"""Enhance configuration.
:param str domain: domain to enhance
:param str enhancement: enhancement type defined in
:const:`~letsencrypt.constants.ENHANCEMENTS`
:param options: options for the enhancement
See :const:`~letsencrypt.constants.ENHANCEMENTS`
documentation for appropriate parameter.
"""
try:
return self._enhance_func[enhancement](
self.choose_vhost(domain), options)
except ValueError:
raise errors.PluginError(
"Unsupported enhancement: {}".format(enhancement))
except errors.PluginError:
logger.warn("Failed %s for %s", enhancement, domain)
def _enable_redirect(self, ssl_vhost, unused_options):
"""Redirect all equivalent HTTP traffic to ssl_vhost.
.. todo:: This enhancement should be rewritten and will
unfortunately require lots of debugging by hand.
Adds Redirect directive to the port 80 equivalent of ssl_vhost
First the function attempts to find the vhost with equivalent
ip addresses that serves on non-ssl ports
The function then adds the directive
.. note:: This function saves the configuration
:param ssl_vhost: Destination of traffic, an ssl enabled vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:param unused_options: Not currently used
:type unused_options: Not Available
:returns: Success, general_vhost (HTTP vhost)
:rtype: (bool, :class:`~letsencrypt_apache.obj.VirtualHost`)
"""
if not self.mod_loaded("rewrite_module"):
self.enable_mod("rewrite")
general_v = self._general_vhost(ssl_vhost)
if general_v is None:
# Add virtual_server with redirect
logger.debug(
"Did not find http version of ssl virtual host... creating")
return self._create_redirect_vhost(ssl_vhost)
else:
# Check if redirection already exists
exists, code = self._existing_redirect(general_v)
if exists:
if code == 0:
logger.debug("Redirect already added")
logger.info(
"Configuration is already redirecting traffic to HTTPS")
return
else:
logger.info("Unknown redirect exists for this vhost")
raise errors.PluginError(
"Unknown redirect already exists "
"in {}".format(general_v.filep))
# Add directives to server
self.parser.add_dir(general_v.path, "RewriteEngine", "On")
self.parser.add_dir(general_v.path, "RewriteRule",
constants.REWRITE_HTTPS_ARGS)
self.save_notes += ("Redirecting host in %s to ssl vhost in %s\n" %
(general_v.filep, ssl_vhost.filep))
self.save()
logger.info("Redirecting vhost in %s to ssl vhost in %s",
general_v.filep, ssl_vhost.filep)
def _existing_redirect(self, vhost):
"""Checks to see if existing redirect is in place.
Checks to see if virtualhost already contains a rewrite or redirect
returns boolean, integer
The boolean indicates whether the redirection exists...
The integer has the following code:
0 - Existing letsencrypt https rewrite rule is appropriate and in place
1 - Virtual host contains a Redirect directive
2 - Virtual host contains an unknown RewriteRule
-1 is also returned in case of no redirection/rewrite directives
:param vhost: vhost to check
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: Success, code value... see documentation
:rtype: bool, int
"""
rewrite_path = self.parser.find_dir(
parser.case_i("RewriteRule"), None, vhost.path)
redirect_path = self.parser.find_dir(
parser.case_i("Redirect"), None, vhost.path)
if redirect_path:
# "Existing Redirect directive for virtualhost"
return True, 1
if not rewrite_path:
# "No existing redirection for virtualhost"
return False, -1
if len(rewrite_path) == len(constants.REWRITE_HTTPS_ARGS):
for idx, match in enumerate(rewrite_path):
if (self.aug.get(match) !=
constants.REWRITE_HTTPS_ARGS[idx]):
# Not a letsencrypt https rewrite
return True, 2
# Existing letsencrypt https rewrite rule is in place
return True, 0
# Rewrite path exists but is not a letsencrypt https rule
return True, 2
def _create_redirect_vhost(self, ssl_vhost):
"""Creates an http_vhost specifically to redirect for the ssl_vhost.
:param ssl_vhost: ssl vhost
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: tuple of the form
(`success`, :class:`~letsencrypt_apache.obj.VirtualHost`)
:rtype: tuple
"""
# Consider changing this to a dictionary check
# Make sure adding the vhost will be safe
conflict, host_or_addrs = self._conflicting_host(ssl_vhost)
if conflict:
raise errors.PluginError(
"Unable to create a redirection vhost - {}".format(
host_or_addrs))
redirect_addrs = host_or_addrs
# get servernames and serveraliases
serveralias = ""
servername = ""
size_n = len(ssl_vhost.names)
if size_n > 0:
servername = "ServerName " + ssl_vhost.names[0]
if size_n > 1:
serveralias = " ".join(ssl_vhost.names[1:size_n])
serveralias = "ServerAlias " + serveralias
redirect_file = ("<VirtualHost" + redirect_addrs + ">\n"
"%s \n"
"%s \n"
"ServerSignature Off\n"
"\n"
"RewriteEngine On\n"
"RewriteRule %s\n"
"\n"
"ErrorLog /var/log/apache2/redirect.error.log\n"
"LogLevel warn\n"
"</VirtualHost>\n"
% (servername, serveralias,
" ".join(constants.REWRITE_HTTPS_ARGS)))
# Write out the file
# This is the default name
redirect_filename = "le-redirect.conf"
# See if a more appropriate name can be applied
if len(ssl_vhost.names) > 0:
# Sanity check...
# make sure servername doesn't exceed filename length restriction
if ssl_vhost.names[0] < (255-23):
redirect_filename = "le-redirect-%s.conf" % ssl_vhost.names[0]
redirect_filepath = os.path.join(
self.parser.root, "sites-available", redirect_filename)
# Register the new file that will be created
# Note: always register the creation before writing to ensure file will
# be removed in case of unexpected program exit
self.reverter.register_file_creation(False, redirect_filepath)
# Write out file
with open(redirect_filepath, "w") as redirect_fd:
redirect_fd.write(redirect_file)
logger.info("Created redirect file: %s", redirect_filename)
self.aug.load()
# Make a new vhost data structure and add it to the lists
new_vhost = self._create_vhost(parser.get_aug_path(redirect_filepath))
self.vhosts.append(new_vhost)
# Finally create documentation for the change
self.save_notes += ("Created a port 80 vhost, %s, for redirection to "
"ssl vhost %s\n" %
(new_vhost.filep, ssl_vhost.filep))
def _conflicting_host(self, ssl_vhost):
"""Checks for conflicting HTTP vhost for ssl_vhost.
Checks for a conflicting host, such that a new port 80 host could not
be created without ruining the apache config
Used with redirection
returns: conflict, host_or_addrs - boolean
if conflict: returns conflicting vhost
if not conflict: returns space separated list of new host addrs
:param ssl_vhost: SSL Vhost to check for possible port 80 redirection
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: TODO
:rtype: TODO
"""
# Consider changing this to a dictionary check
redirect_addrs = ""
for ssl_a in ssl_vhost.addrs:
# Add space on each new addr, combine "VirtualHost"+redirect_addrs
redirect_addrs = redirect_addrs + " "
ssl_a_vhttp = ssl_a.get_addr_obj("80")
# Search for a conflicting host...
for vhost in self.vhosts:
if vhost.enabled:
if (ssl_a_vhttp in vhost.addrs or
ssl_a.get_addr_obj("") in vhost.addrs or
ssl_a.get_addr_obj("*") in vhost.addrs):
# We have found a conflicting host... just return
return True, vhost
redirect_addrs = redirect_addrs + ssl_a_vhttp
return False, redirect_addrs
def _general_vhost(self, ssl_vhost):
"""Find appropriate HTTP vhost for ssl_vhost.
Function needs to be thoroughly tested and perhaps improved
Will not do well with malformed configurations
Consider changing this into a dict check
:param ssl_vhost: ssl vhost to check
:type ssl_vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: HTTP vhost or None if unsuccessful
:rtype: :class:`~letsencrypt_apache.obj.VirtualHost` or ``None``
"""
# _default_:443 check
# Instead... should look for vhost of the form *:80
# Should we prompt the user?
ssl_addrs = ssl_vhost.addrs
if ssl_addrs == common.Addr.fromstring("_default_:443"):
ssl_addrs = [common.Addr.fromstring("*:443")]
for vhost in self.vhosts:
found = 0
# Not the same vhost, and same number of addresses
if vhost != ssl_vhost and len(vhost.addrs) == len(ssl_vhost.addrs):
# Find each address in ssl_host in test_host
for ssl_a in ssl_addrs:
for test_a in vhost.addrs:
if test_a.get_addr() == ssl_a.get_addr():
# Check if found...
if (test_a.get_port() == "80" or
test_a.get_port() == "" or
test_a.get_port() == "*"):
found += 1
break
# Check to make sure all addresses were found
# and names are equal
if (found == len(ssl_vhost.addrs) and
vhost.names == ssl_vhost.names):
return vhost
return None
def get_all_certs_keys(self):
"""Find all existing keys, certs from configuration.
Retrieve all certs and keys set in VirtualHosts on the Apache server
:returns: list of tuples with form [(cert, key, path)]
cert - str path to certificate file
key - str path to associated key file
path - File path to configuration file.
:rtype: list
"""
c_k = set()
for vhost in self.vhosts:
if vhost.ssl:
cert_path = self.parser.find_dir(
parser.case_i("SSLCertificateFile"), None, vhost.path)
key_path = self.parser.find_dir(
parser.case_i("SSLCertificateKeyFile"), None, vhost.path)
# Can be removed once find directive can return ordered results
if len(cert_path) != 1 or len(key_path) != 1:
logger.error("Too many cert or key directives in vhost %s",
vhost.filep)
errors.MisconfigurationError(
"Too many cert/key directives in vhost")
cert = os.path.abspath(self.aug.get(cert_path[0]))
key = os.path.abspath(self.aug.get(key_path[0]))
c_k.add((cert, key, get_file_path(cert_path[0])))
return c_k
def is_site_enabled(self, avail_fp):
"""Checks to see if the given site is enabled.
.. todo:: fix hardcoded sites-enabled, check os.path.samefile
:param str avail_fp: Complete file path of available site
:returns: Success
:rtype: bool
"""
enabled_dir = os.path.join(self.parser.root, "sites-enabled")
for entry in os.listdir(enabled_dir):
if os.path.realpath(os.path.join(enabled_dir, entry)) == avail_fp:
return True
return False
def enable_site(self, vhost):
"""Enables an available site, Apache restart required.
.. todo:: This function should number subdomains before the domain vhost
.. todo:: Make sure link is not broken...
:param vhost: vhost to enable
:type vhost: :class:`~letsencrypt_apache.obj.VirtualHost`
:returns: Success
:rtype: bool
"""
if self.is_site_enabled(vhost.filep):
return True
if "/sites-available/" in vhost.filep:
enabled_path = ("%s/sites-enabled/%s" %
(self.parser.root, os.path.basename(vhost.filep)))
self.reverter.register_file_creation(False, enabled_path)
os.symlink(vhost.filep, enabled_path)
vhost.enabled = True
logger.info("Enabling available site: %s", vhost.filep)
self.save_notes += "Enabled site %s\n" % vhost.filep
return True
return False
def enable_mod(self, mod_name):
"""Enables module in Apache.
Both enables and restarts Apache so module is active.
:param str mod_name: Name of the module to enable.
"""
try:
# Use check_output so the command will finish before reloading
# TODO: a2enmod is debian specific...
subprocess.check_call([self.conf("enmod"), mod_name],
stdout=open("/dev/null", "w"),
stderr=open("/dev/null", "w"))
apache_restart(self.conf("init"))
except (OSError, subprocess.CalledProcessError):
logger.exception("Error enabling mod_%s", mod_name)
raise errors.MisconfigurationError(
"Missing enable_mod binary or lack privileges")
def mod_loaded(self, module):
"""Checks to see if mod_ssl is loaded
Uses ``apache_ctl`` to get loaded module list. This also effectively
serves as a config_test.
:returns: If ssl_module is included and active in Apache
:rtype: bool
"""
try:
proc = subprocess.Popen(
[self.conf("ctl"), "-M"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.error(
"Error accessing %s for loaded modules!", self.conf("ctl"))
raise errors.MisconfigurationError("Error accessing loaded modules")
# Small errors that do not impede
if proc.returncode != 0:
logger.warn("Error in checking loaded module list: %s", stderr)
raise errors.MisconfigurationError(
"Apache is unable to check whether or not the module is "
"loaded because Apache is misconfigured.")
if module in stdout:
return True
return False
def restart(self):
"""Restarts apache server.
:returns: Success
:rtype: bool
"""
return apache_restart(self.conf("init-script"))
def config_test(self): # pylint: disable=no-self-use
"""Check the configuration of Apache for errors.
:returns: Success
:rtype: bool
"""
try:
proc = subprocess.Popen(
[self.conf("ctl"), "configtest"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
except (OSError, ValueError):
logger.fatal("Unable to run /usr/sbin/apache2ctl configtest")
raise errors.PluginError("Unable to run apache2ctl")
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Configtest failed\n%s\n%s", stdout, stderr)
return False
return True
def verify_setup(self):
"""Verify the setup to ensure safe operating environment.
Make sure that files/directories are setup with appropriate permissions
Aim for defensive coding... make sure all input files
have permissions of root
"""
uid = os.geteuid()
le_util.make_or_verify_dir(
self.config.config_dir, core_constants.CONFIG_DIRS_MODE, uid)
le_util.make_or_verify_dir(
self.config.work_dir, core_constants.CONFIG_DIRS_MODE, uid)
le_util.make_or_verify_dir(
self.config.backup_dir, core_constants.CONFIG_DIRS_MODE, uid)
def get_version(self):
"""Return version of Apache Server.
Version is returned as tuple. (ie. 2.4.7 = (2, 4, 7))
:returns: version
:rtype: tuple
:raises .PluginError: if unable to find Apache version
"""
try:
proc = subprocess.Popen(
[self.conf("ctl"), "-v"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
text = proc.communicate()[0]
except (OSError, ValueError):
raise errors.PluginError(
"Unable to run %s -v" % self.conf("ctl"))
regex = re.compile(r"Apache/([0-9\.]*)", re.IGNORECASE)
matches = regex.findall(text)
if len(matches) != 1:
raise errors.PluginError("Unable to find Apache version")
return tuple([int(i) for i in matches[0].split(".")])
def more_info(self):
"""Human-readable string to help understand the module"""
return (
"Configures Apache to authenticate and install HTTPS.{0}"
"Server root: {root}{0}"
"Version: {version}".format(
os.linesep, root=self.parser.loc["root"],
version=".".join(str(i) for i in self.version))
)
###########################################################################
# Challenges Section
###########################################################################
def get_chall_pref(self, unused_domain): # pylint: disable=no-self-use
"""Return list of challenge preferences."""
return [challenges.DVSNI]
def perform(self, achalls):
"""Perform the configuration related challenge.
This function currently assumes all challenges will be fulfilled.
If this turns out not to be the case in the future. Cleanup and
outstanding challenges will have to be designed better.
"""
self._chall_out += len(achalls)
responses = [None] * len(achalls)
apache_dvsni = dvsni.ApacheDvsni(self)
for i, achall in enumerate(achalls):
if isinstance(achall, achallenges.DVSNI):
# Currently also have dvsni hold associated index
# of the challenge. This helps to put all of the responses back
# together when they are all complete.
apache_dvsni.add_chall(achall, i)
sni_response = apache_dvsni.perform()
if sni_response:
# Must restart in order to activate the challenges.
# Handled here because we may be able to load up other challenge
# types
self.restart()
# Go through all of the challenges and assign them to the proper
# place in the responses return value. All responses must be in the
# same order as the original challenges.
for i, resp in enumerate(sni_response):
responses[apache_dvsni.indices[i]] = resp
return responses
def cleanup(self, achalls):
"""Revert all challenges."""
self._chall_out -= len(achalls)
# If all of the challenges have been finished, clean up everything
if self._chall_out <= 0:
self.revert_challenge_config()
self.restart()
def apache_restart(apache_init_script):
"""Restarts the Apache Server.
:param str apache_init_script: Path to the Apache init script.
.. todo:: Try to use reload instead. (This caused timing problems before)
.. todo:: On failure, this should be a recovery_routine call with another
restart. This will confuse and inhibit developers from testing code
though. This change should happen after
the ApacheConfigurator has been thoroughly tested. The function will
need to be moved into the class again. Perhaps
this version can live on... for testing purposes.
"""
try:
proc = subprocess.Popen([apache_init_script, "restart"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
# Enter recovery routine...
logger.error("Apache Restart Failed!\n%s\n%s", stdout, stderr)
return False
except (OSError, ValueError):
logger.fatal(
"Apache Restart Failed - Please Check the Configuration")
raise errors.MisconfigurationError("Unable to restart Apache process")
return True
def get_file_path(vhost_path):
"""Get file path from augeas_vhost_path.
Takes in Augeas path and returns the file name
:param str vhost_path: Augeas virtual host path
:returns: filename of vhost
:rtype: str
"""
# Strip off /files
avail_fp = vhost_path[6:]
# This can be optimized...
while True:
# Cast both to lowercase to be case insensitive
find_if = avail_fp.lower().find("/ifmodule")
if find_if != -1:
avail_fp = avail_fp[:find_if]
continue
find_vh = avail_fp.lower().find("/virtualhost")
if find_vh != -1:
avail_fp = avail_fp[:find_vh]
continue
break
return avail_fp
def temp_install(options_ssl):
"""Temporary install for convenience."""
# WARNING: THIS IS A POTENTIAL SECURITY VULNERABILITY
# THIS SHOULD BE HANDLED BY THE PACKAGE MANAGER
# AND TAKEN OUT BEFORE RELEASE, INSTEAD
# SHOWING A NICE ERROR MESSAGE ABOUT THE PROBLEM.
# Check to make sure options-ssl.conf is installed
if not os.path.isfile(options_ssl):
shutil.copyfile(constants.MOD_SSL_CONF_SRC, options_ssl)
|
{
"content_hash": "33ab00256199ec71b657685f20063c67",
"timestamp": "",
"source": "github",
"line_count": 1186,
"max_line_length": 80,
"avg_line_length": 38.822091062394605,
"alnum_prop": 0.5892535238798514,
"repo_name": "tdfischer/lets-encrypt-preview",
"id": "c8083b4064fa0b099b6208c3f4acfb8e3fc633a9",
"size": "46043",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "letsencrypt-apache/letsencrypt_apache/configurator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "13069"
},
{
"name": "DIGITAL Command Language",
"bytes": "133"
},
{
"name": "Groff",
"bytes": "222"
},
{
"name": "Nginx",
"bytes": "4274"
},
{
"name": "Python",
"bytes": "937820"
},
{
"name": "Shell",
"bytes": "9179"
}
],
"symlink_target": ""
}
|
import tests.periodicities.period_test as per
per.buildModel((60 , 'H' , 400));
|
{
"content_hash": "f5106835d367da98dc546ef6f2ceb046",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 45,
"avg_line_length": 20.5,
"alnum_prop": 0.7073170731707317,
"repo_name": "antoinecarme/pyaf",
"id": "5a87e2c16fe49a56ae3998d0fb66eba99aeeae1b",
"size": "82",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/periodicities/Hour/Cycle_Hour_400_H_60.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from textwrap import dedent
import tempfile
import yaml
import os
import subprocess as sp
import conda_build.api
def ensure_missing(package):
"""
Delete a package if it exists and re-index the conda-bld dir.
If a package is deleted from the conda-bld directory but conda-index is not
re-run, it remains in the metadata (.index.json, repodata.json) files and
appears to conda as if the recipe still exists. This ensures that the
package is deleted and is removed from the index. Useful for test cases.
Parameters
----------
package : str
Path to tarball of built package. If all you have is a recipe path, use
`built_package_path()` to get the tarball path.
"""
if os.path.exists(package):
os.unlink(package)
assert not os.path.exists(package)
conda_build.api.update_index([os.path.dirname(os.path.dirname(package))])
class Recipes(object):
def __init__(self, data, from_string=False):
"""
Handles the creation of a directory of recipes.
This class, combined with YAML files describing test cases, can be used
for building test cases of interdependent recipes in an isolated
directory.
Recipes are specified in a YAML file. Each top-level key represents
a recipe, and the recipe will be written in a temp dir named after that
key. Sub-keys are filenames to create in that directory, and the value
of each sub-key is a string (likely a multi-line string indicated with
a "|").
For example, this YAML file::
one:
meta.yaml: |
package:
name: one
version: 0.1
build.sh: |
#!/bin/bash
# do installation
two:
meta.yaml: |
package:
name: two
version: 0.1
build.sh:
#!/bin/bash
python setup.py install
will result in these files::
/tmp/tmpdirname/
one/
meta.yaml
build.sh
two/
meta.yaml
build.sh
Parameters
----------
data : str
If `from_string` is False, this is a filename relative to this
module's file. If `from_string` is True, then use the contents of
the string directly.
from_string : bool
Useful attributes:
* recipes: a dict mapping recipe names to parsed meta.yaml contents
* basedir: the tempdir containing all recipes. Many bioconda-utils
functions need the "recipes dir"; that's this basedir.
* recipe_dirs: a dict mapping recipe names to newly-created recipe
dirs. These are full paths to subdirs in `basedir`.
"""
if from_string:
self.data = dedent(data)
self.recipes = yaml.safe_load(data)
else:
self.data = os.path.join(os.path.dirname(__file__), data)
self.recipes = yaml.safe_load(open(self.data))
def write_recipes(self):
basedir = tempfile.mkdtemp()
self.recipe_dirs = {}
for name, recipe in self.recipes.items():
rdir = os.path.join(basedir, name)
os.makedirs(rdir)
self.recipe_dirs[name] = rdir
for key, value in recipe.items():
with open(os.path.join(rdir, key), 'w') as fout:
fout.write(value)
self.basedir = basedir
@property
def recipe_dirnames(self):
return list(self.recipe_dirs.values())
|
{
"content_hash": "5bb47e9ff42970284aaffd5cac48d149",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 79,
"avg_line_length": 32.025862068965516,
"alnum_prop": 0.5695827725437416,
"repo_name": "bioconda/bioconda-utils",
"id": "35a78b07ba9da8428a87ef254cb0ad78fa8e1993",
"size": "3715",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/helpers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5624"
},
{
"name": "Dockerfile",
"bytes": "2433"
},
{
"name": "HTML",
"bytes": "5508"
},
{
"name": "Python",
"bytes": "709484"
},
{
"name": "Shell",
"bytes": "2952"
}
],
"symlink_target": ""
}
|
from celery.utils.log import get_task_logger
from celerydemo import app
logger = get_task_logger(__name__)
print("other's logger is %s" % logger)
@app.task
def multi(x, y):
logger.info('x * y')
return x * y
|
{
"content_hash": "0801450e36d01b138360b3c2bc0e2606",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 44,
"avg_line_length": 16.923076923076923,
"alnum_prop": 0.6636363636363637,
"repo_name": "hugoxia/Python",
"id": "16ac71b79017301b0b8d2eb249afb49ee724bf27",
"size": "220",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "celerydemo/other.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "394"
},
{
"name": "HTML",
"bytes": "4511"
},
{
"name": "JavaScript",
"bytes": "1426"
},
{
"name": "Python",
"bytes": "91850"
},
{
"name": "Shell",
"bytes": "217"
}
],
"symlink_target": ""
}
|
"""Worry-free YAML configuration files.
"""
from __future__ import unicode_literals
import platform
import os
import pkgutil
import sys
import yaml
import types
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
UNIX_DIR_VAR = 'XDG_CONFIG_HOME'
UNIX_DIR_FALLBACK = '~/.config'
WINDOWS_DIR_VAR = 'APPDATA'
WINDOWS_DIR_FALLBACK = '~\\AppData\\Roaming'
MAC_DIR = '~/Library/Application Support'
CONFIG_FILENAME = 'config.yaml'
DEFAULT_FILENAME = 'config_default.yaml'
ROOT_NAME = 'root'
YAML_TAB_PROBLEM = "found character '\\t' that cannot start any token"
# Utilities.
PY3 = sys.version_info[0] == 3
STRING = str if PY3 else unicode
BASESTRING = str if PY3 else basestring
NUMERIC_TYPES = (int, float) if PY3 else (int, float, long)
TYPE_TYPES = (type,) if PY3 else (type, types.ClassType)
def iter_first(sequence):
"""Get the first element from an iterable or raise a ValueError if
the iterator generates no values.
"""
it = iter(sequence)
try:
if PY3:
return next(it)
else:
return it.next()
except StopIteration:
raise ValueError()
# Exceptions.
class ConfigError(Exception):
"""Base class for exceptions raised when querying a configuration.
"""
class NotFoundError(ConfigError):
"""A requested value could not be found in the configuration trees.
"""
class ConfigTypeError(ConfigError, TypeError):
"""The value in the configuration did not match the expected type.
"""
class ConfigValueError(ConfigError, ValueError):
"""The value in the configuration is illegal."""
class ConfigReadError(ConfigError):
"""A configuration file could not be read."""
def __init__(self, filename, reason=None):
self.filename = filename
self.reason = reason
message = 'file {0} could not be read'.format(filename)
if isinstance(reason, yaml.scanner.ScannerError) and \
reason.problem == YAML_TAB_PROBLEM:
# Special-case error message for tab indentation in YAML markup.
message += ': found tab character at line {0}, column {1}'.format(
reason.problem_mark.line + 1,
reason.problem_mark.column + 1,
)
elif reason:
# Generic error message uses exception's message.
message += ': {0}'.format(reason)
super(ConfigReadError, self).__init__(message)
# Views and sources.
class ConfigSource(dict):
"""A dictionary augmented with metadata about the source of the
configuration.
"""
def __init__(self, value, filename=None, default=False):
super(ConfigSource, self).__init__(value)
if filename is not None and not isinstance(filename, BASESTRING):
raise TypeError('filename must be a string or None')
self.filename = filename
self.default = default
def __repr__(self):
return 'ConfigSource({0}, {1}, {2})'.format(
super(ConfigSource, self).__repr__(),
repr(self.filename),
repr(self.default)
)
@classmethod
def of(self, value):
"""Given either a dictionary or a `ConfigSource` object, return
a `ConfigSource` object. This lets a function accept either type
of object as an argument.
"""
if isinstance(value, ConfigSource):
return value
elif isinstance(value, dict):
return ConfigSource(value)
else:
raise TypeError('source value must be a dict')
class ConfigView(object):
"""A configuration "view" is a query into a program's configuration
data. A view represents a hypothetical location in the configuration
tree; to extract the data from the location, a client typically
calls the ``view.get()`` method. The client can access children in
the tree (subviews) by subscripting the parent view (i.e.,
``view[key]``).
"""
name = None
"""The name of the view, depicting the path taken through the
configuration in Python-like syntax (e.g., ``foo['bar'][42]``).
"""
def resolve(self):
"""The core (internal) data retrieval method. Generates (value,
source) pairs for each source that contains a value for this
view. May raise ConfigTypeError if a type error occurs while
traversing a source.
"""
raise NotImplementedError
def first(self):
"""Return a (value, source) pair for the first object found for
this view. This amounts to the first element returned by
`resolve`. If no values are available, a NotFoundError is
raised.
"""
pairs = self.resolve()
try:
return iter_first(pairs)
except ValueError:
raise NotFoundError("{0} not found".format(self.name))
def exists(self):
"""Determine whether the view has a setting in any source.
"""
try:
self.first()
except NotFoundError:
return False
return True
def add(self, value):
"""Set the *default* value for this configuration view. The
specified value is added as the lowest-priority configuration
data source.
"""
raise NotImplementedError
def set(self, value):
"""*Override* the value for this configuration view. The
specified value is added as the highest-priority configuration
data source.
"""
raise NotImplementedError
def root(self):
"""The RootView object from which this view is descended.
"""
raise NotImplementedError
def __repr__(self):
return '<ConfigView: %s>' % self.name
def __getitem__(self, key):
"""Get a subview of this view."""
return Subview(self, key)
def __setitem__(self, key, value):
"""Create an overlay source to assign a given key under this
view.
"""
self.set({key: value})
def set_args(self, namespace):
"""Overlay parsed command-line arguments, generated by a library
like argparse or optparse, onto this view's value.
"""
args = {}
for key, value in namespace.__dict__.items():
if value is not None: # Avoid unset options.
args[key] = value
self.set(args)
# Magical conversions. These special methods make it possible to use
# View objects somewhat transparently in certain circumstances. For
# example, rather than using ``view.get(bool)``, it's possible to
# just say ``bool(view)`` or use ``view`` in a conditional.
def __str__(self):
"""Gets the value for this view as a byte string."""
return str(self.get())
def __unicode__(self):
"""Gets the value for this view as a unicode string. (Python 2
only.)
"""
return unicode(self.get())
def __nonzero__(self):
"""Gets the value for this view as a boolean. (Python 2 only.)
"""
return self.__bool__()
def __bool__(self):
"""Gets the value for this view as a boolean. (Python 3 only.)
"""
return bool(self.get())
# Dictionary emulation methods.
def keys(self):
"""Returns a list containing all the keys available as subviews
of the current views. This enumerates all the keys in *all*
dictionaries matching the current view, in contrast to
``view.get(dict).keys()``, which gets all the keys for the
*first* dict matching the view. If the object for this view in
any source is not a dict, then a ConfigTypeError is raised. The
keys are ordered according to how they appear in each source.
"""
keys = []
for dic, _ in self.resolve():
try:
cur_keys = dic.keys()
except AttributeError:
raise ConfigTypeError(
'{0} must be a dict, not {1}'.format(
self.name, type(dic).__name__
)
)
for key in cur_keys:
if key not in keys:
keys.append(key)
return keys
def items(self):
"""Iterates over (key, subview) pairs contained in dictionaries
from *all* sources at this view. If the object for this view in
any source is not a dict, then a ConfigTypeError is raised.
"""
for key in self.keys():
yield key, self[key]
def values(self):
"""Iterates over all the subviews contained in dictionaries from
*all* sources at this view. If the object for this view in any
source is not a dict, then a ConfigTypeError is raised.
"""
for key in self.keys():
yield self[key]
# List/sequence emulation.
def all_contents(self):
"""Iterates over all subviews from collections at this view from
*all* sources. If the object for this view in any source is not
iterable, then a ConfigTypeError is raised. This method is
intended to be used when the view indicates a list; this method
will concatenate the contents of the list from all sources.
"""
for collection, _ in self.resolve():
try:
it = iter(collection)
except TypeError:
raise ConfigTypeError(
'{0} must be an iterable, not {1}'.format(
self.name, type(collection).__name__
)
)
for value in it:
yield value
# Validation and conversion.
def get(self, typ=None):
"""Returns the canonical value for the view, checked against the
passed-in type. If the value is not an instance of the given
type, a ConfigTypeError is raised. May also raise a
NotFoundError.
"""
value, _ = self.first()
if typ is not None:
if not isinstance(typ, TYPE_TYPES):
raise TypeError('argument to get() must be a type')
if not isinstance(value, typ):
raise ConfigTypeError(
"{0} must be of type {1}, not {2}".format(
self.name, typ.__name__, type(value).__name__
)
)
return value
def as_filename(self):
"""Get a string as a normalized as an absolute, tilde-free path.
Relative paths are relative to the configuration directory (see
the `config_dir` method) if they come from a file. Otherwise,
they are relative to the current working directory. This helps
attain the expected behavior when using command-line options.
"""
path, source = self.first()
if not isinstance(path, BASESTRING):
raise ConfigTypeError('{0} must be a filename, not {1}'.format(
self.name, type(path).__name__
))
path = os.path.expanduser(STRING(path))
if not os.path.isabs(path) and source.filename:
# From defaults: relative to the app's directory.
path = os.path.join(self.root().config_dir(), path)
return os.path.abspath(path)
def as_choice(self, choices):
"""Ensure that the value is among a collection of choices and
return it. If `choices` is a dictionary, then return the
corresponding value rather than the value itself (the key).
"""
value = self.get()
if value not in choices:
raise ConfigValueError(
'{0} must be one of {1}, not {2}'.format(
self.name, repr(list(choices)), repr(value)
)
)
if isinstance(choices, dict):
return choices[value]
else:
return value
def as_number(self):
"""Ensure that a value is of numeric type."""
value = self.get()
if isinstance(value, NUMERIC_TYPES):
return value
raise ConfigTypeError(
'{0} must be numeric, not {1}'.format(
self.name, type(value).__name__
)
)
def as_str_seq(self):
"""Get the value as a list of strings. The underlying configured
value can be a sequence or a single string. In the latter case,
the string is treated as a white-space separated list of words.
"""
value = self.get()
if isinstance(value, bytes):
value = value.decode('utf8', 'ignore')
if isinstance(value, STRING):
return value.split()
else:
try:
return list(value)
except TypeError:
raise ConfigTypeError(
'{0} must be a whitespace-separated string or '
'a list'.format(self.name)
)
def flatten(self):
"""Create a hierarchy of OrderedDicts containing the data from
this view, recursively reifying all views to get their
represented values.
"""
od = OrderedDict()
for key, view in self.items():
try:
od[key] = view.flatten()
except ConfigTypeError:
od[key] = view.get()
return od
class RootView(ConfigView):
"""The base of a view hierarchy. This view keeps track of the
sources that may be accessed by subviews.
"""
def __init__(self, sources):
"""Create a configuration hierarchy for a list of sources. At
least one source must be provided. The first source in the list
has the highest priority.
"""
self.sources = list(sources)
self.name = ROOT_NAME
def add(self, obj):
self.sources.append(ConfigSource.of(obj))
def set(self, value):
self.sources.insert(0, ConfigSource.of(value))
def resolve(self):
return ((dict(s), s) for s in self.sources)
def clear(self):
"""Remove all sources from this configuration."""
del self.sources[:]
def root(self):
return self
class Subview(ConfigView):
"""A subview accessed via a subscript of a parent view."""
def __init__(self, parent, key):
"""Make a subview of a parent view for a given subscript key.
"""
self.parent = parent
self.key = key
# Choose a human-readable name for this view.
if isinstance(self.parent, RootView):
self.name = ''
else:
self.name = self.parent.name
if not isinstance(self.key, int):
self.name += '.'
if isinstance(self.key, int):
self.name += '#{0}'.format(self.key)
elif isinstance(self.key, BASESTRING):
self.name += '{0}'.format(self.key)
else:
self.name += '{0}'.format(repr(self.key))
def resolve(self):
for collection, source in self.parent.resolve():
try:
value = collection[self.key]
except IndexError:
# List index out of bounds.
continue
except KeyError:
# Dict key does not exist.
continue
except TypeError:
# Not subscriptable.
raise ConfigTypeError(
"{0} must be a collection, not {1}".format(
self.parent.name, type(collection).__name__
)
)
yield value, source
def set(self, value):
self.parent.set({self.key: value})
def add(self, value):
self.parent.add({self.key: value})
def root(self):
return self.parent.root()
# Config file paths, including platform-specific paths and in-package
# defaults.
# Based on get_root_path from Flask by Armin Ronacher.
def _package_path(name):
"""Returns the path to the package containing the named module or
None if the path could not be identified (e.g., if
``name == "__main__"``).
"""
loader = pkgutil.get_loader(name)
if loader is None or name == '__main__':
return None
if hasattr(loader, 'get_filename'):
filepath = loader.get_filename(name)
else:
# Fall back to importing the specified module.
__import__(name)
filepath = sys.modules[name].__file__
return os.path.dirname(os.path.abspath(filepath))
def config_dirs():
"""Return a platform-specific list of candidates for user
configuration directories on the system.
The candidates are in order of priority, from highest to lowest. The
last element is the "fallback" location to be used when no
higher-priority config file exists.
"""
paths = []
if platform.system() == 'Darwin':
paths.append(MAC_DIR)
paths.append(UNIX_DIR_FALLBACK)
if UNIX_DIR_VAR in os.environ:
paths.append(os.environ[UNIX_DIR_VAR])
elif platform.system() == 'Windows':
paths.append(WINDOWS_DIR_FALLBACK)
if WINDOWS_DIR_VAR in os.environ:
paths.append(os.environ[WINDOWS_DIR_VAR])
else:
# Assume Unix.
paths.append(UNIX_DIR_FALLBACK)
if UNIX_DIR_VAR in os.environ:
paths.append(os.environ[UNIX_DIR_VAR])
# Expand and deduplicate paths.
out = []
for path in paths:
path = os.path.abspath(os.path.expanduser(path))
if path not in out:
out.append(path)
return out
# YAML loading.
class Loader(yaml.SafeLoader):
"""A customized YAML loader. This loader deviates from the official
YAML spec in a few convenient ways:
- All strings as are Unicode objects.
- All maps are OrderedDicts.
- Strings can begin with % without quotation.
"""
# All strings should be Unicode objects, regardless of contents.
def _construct_unicode(self, node):
return self.construct_scalar(node)
# Use ordered dictionaries for every YAML map.
# From https://gist.github.com/844388
def construct_yaml_map(self, node):
data = OrderedDict()
yield data
value = self.construct_mapping(node)
data.update(value)
def construct_mapping(self, node, deep=False):
if isinstance(node, yaml.MappingNode):
self.flatten_mapping(node)
else:
raise yaml.constructor.ConstructorError(
None, None,
'expected a mapping node, but found %s' % node.id,
node.start_mark
)
mapping = OrderedDict()
for key_node, value_node in node.value:
key = self.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
raise yaml.constructor.ConstructorError(
'while constructing a mapping',
node.start_mark, 'found unacceptable key (%s)' % exc,
key_node.start_mark
)
value = self.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
# Allow bare strings to begin with %. Directives are still detected.
def check_plain(self):
plain = super(Loader, self).check_plain()
return plain or self.peek() == '%'
Loader.add_constructor('tag:yaml.org,2002:str', Loader._construct_unicode)
Loader.add_constructor('tag:yaml.org,2002:map', Loader.construct_yaml_map)
Loader.add_constructor('tag:yaml.org,2002:omap', Loader.construct_yaml_map)
def load_yaml(filename):
"""Read a YAML document from a file. If the file cannot be read or
parsed, a ConfigReadError is raised.
"""
try:
with open(filename, 'r') as f:
return yaml.load(f, Loader=Loader)
except (IOError, yaml.error.YAMLError) as exc:
raise ConfigReadError(filename, exc)
# YAML dumping.
class Dumper(yaml.SafeDumper):
"""A PyYAML Dumper that represents OrderedDicts as ordinary mappings
(in order, of course).
"""
# From http://pyyaml.org/attachment/ticket/161/use_ordered_dict.py
def represent_mapping(self, tag, mapping, flow_style=None):
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if self.alias_key is not None:
self.represented_objects[self.alias_key] = node
best_style = False
if hasattr(mapping, 'items'):
mapping = list(mapping.items())
for item_key, item_value in mapping:
node_key = self.represent_data(item_key)
node_value = self.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode)
and not node_key.style):
best_style = False
if not (isinstance(node_value, yaml.ScalarNode)
and not node_value.style):
best_style = False
value.append((node_key, node_value))
if flow_style is None:
if self.default_flow_style is not None:
node.flow_style = self.default_flow_style
else:
node.flow_style = best_style
return node
def represent_list(self, data):
"""If a list has less than 4 items, represent it in inline style
(i.e. comma separated, within square brackets).
"""
node = super(Dumper, self).represent_list(data)
length = len(data)
if self.default_flow_style is None and length < 4:
node.flow_style = True
elif self.default_flow_style is None:
node.flow_style = False
return node
def represent_bool(self, data):
"""Represent bool as 'yes' or 'no' instead of 'true' or 'false'.
"""
if data:
value = 'yes'
else:
value = 'no'
return self.represent_scalar('tag:yaml.org,2002:bool', value)
def represent_none(self, data):
"""Represent a None value with nothing instead of 'none'.
"""
return self.represent_scalar('tag:yaml.org,2002:null', '')
Dumper.add_representer(OrderedDict, Dumper.represent_dict)
Dumper.add_representer(bool, Dumper.represent_bool)
Dumper.add_representer(type(None), Dumper.represent_none)
Dumper.add_representer(list, Dumper.represent_list)
def restore_yaml_comments(data, default_data):
"""Scan default_data for comments (we include empty lines in our
definition of comments) and place them before the same keys in data.
Only works with comments that are on one or more own lines, i.e.
not next to a yaml mapping.
"""
comment_map = dict()
default_lines = iter(default_data.splitlines())
for line in default_lines:
if not line:
comment = "\n"
elif line.startswith("#"):
comment = "{0}\n".format(line)
else:
continue
while True:
line = next(default_lines)
if line and not line.startswith("#"):
break
comment += "{0}\n".format(line)
key = line.split(':')[0].strip()
comment_map[key] = comment
out_lines = iter(data.splitlines())
out_data = ""
for line in out_lines:
key = line.split(':')[0].strip()
if key in comment_map:
out_data += comment_map[key]
out_data += "{0}\n".format(line)
return out_data
# Main interface.
class Configuration(RootView):
def __init__(self, appname, modname=None, read=True):
"""Create a configuration object by reading the
automatically-discovered config files for the application for a
given name. If `modname` is specified, it should be the import
name of a module whose package will be searched for a default
config file. (Otherwise, no defaults are used.) Pass `False` for
`read` to disable automatic reading of all discovered
configuration files. Use this when creating a configuration
object at module load time and then call the `read` method
later.
"""
super(Configuration, self).__init__([])
self.appname = appname
self.modname = modname
self._env_var = '{0}DIR'.format(self.appname.upper())
if read:
self.read()
def user_config_path(self):
"""Points to the location of the user configuration.
The file may not exist.
"""
return os.path.join(self.config_dir(), CONFIG_FILENAME)
def _add_user_source(self):
"""Add the configuration options from the YAML file in the
user's configuration directory (given by `config_dir`) if it
exists.
"""
filename = self.user_config_path()
if os.path.isfile(filename):
self.add(ConfigSource(load_yaml(filename) or {}, filename))
def _add_default_source(self):
"""Add the package's default configuration settings. This looks
for a YAML file located inside the package for the module
`modname` if it was given.
"""
if self.modname:
pkg_path = _package_path(self.modname)
if pkg_path:
filename = os.path.join(pkg_path, DEFAULT_FILENAME)
if os.path.isfile(filename):
self.add(ConfigSource(load_yaml(filename), filename, True))
def read(self, user=True, defaults=True):
"""Find and read the files for this configuration and set them
as the sources for this configuration. To disable either
discovered user configuration files or the in-package defaults,
set `user` or `defaults` to `False`.
"""
if user:
self._add_user_source()
if defaults:
self._add_default_source()
def config_dir(self):
"""Get the path to the user configuration directory. The
directory is guaranteed to exist as a postcondition (one may be
created if none exist).
If the application's ``...DIR`` environment variable is set, it
is used as the configuration directory. Otherwise,
platform-specific standard configuration locations are searched
for a ``config.yaml`` file. If no configuration file is found, a
fallback path is used.
"""
# If environment variable is set, use it.
if self._env_var in os.environ:
appdir = os.environ[self._env_var]
appdir = os.path.abspath(os.path.expanduser(appdir))
if os.path.isfile(appdir):
raise ConfigError('{0} must be a directory'.format(
self._env_var
))
else:
# Search platform-specific locations. If no config file is
# found, fall back to the final directory in the list.
for confdir in config_dirs():
appdir = os.path.join(confdir, self.appname)
if os.path.isfile(os.path.join(appdir, CONFIG_FILENAME)):
break
# Ensure that the directory exists.
if not os.path.isdir(appdir):
os.makedirs(appdir)
return appdir
def set_file(self, filename):
"""Parses the file as YAML and inserts it into the configuration
sources with highest priority.
"""
filename = os.path.abspath(filename)
self.set(ConfigSource(load_yaml(filename), filename))
def dump(self, full=True):
"""Dump the Configuration object to a YAML file.
The order of the keys is determined from the default
configuration file. All keys not in the default configuration
will be appended to the end of the file.
:param filename: The file to dump the configuration to, or None
if the YAML string should be returned instead
:type filename: unicode
:param full: Dump settings that don't differ from the defaults
as well
"""
if full:
out_dict = self.flatten()
else:
# Exclude defaults when flattening.
sources = [s for s in self.sources if not s.default]
out_dict = RootView(sources).flatten()
yaml_out = yaml.dump(out_dict, Dumper=Dumper,
default_flow_style=None, indent=4,
width=1000)
# Restore comments to the YAML text.
default_source = None
for source in self.sources:
if source.default:
default_source = source
break
if default_source:
with open(default_source.filename, 'r') as fp:
default_data = fp.read()
yaml_out = restore_yaml_comments(yaml_out, default_data)
return yaml_out
class LazyConfig(Configuration):
"""A Configuration at reads files on demand when it is first
accessed. This is appropriate for using as a global config object at
the module level.
"""
def __init__(self, appname, modname=None):
super(LazyConfig, self).__init__(appname, modname, False)
self._materialized = False # Have we read the files yet?
self._lazy_prefix = [] # Pre-materialization calls to set().
self._lazy_suffix = [] # Calls to add().
def read(self, user=True, defaults=True):
self._materialized = True
super(LazyConfig, self).read(user, defaults)
def resolve(self):
if not self._materialized:
# Read files and unspool buffers.
self.read()
self.sources += self._lazy_suffix
self.sources[:0] = self._lazy_prefix
return super(LazyConfig, self).resolve()
def add(self, value):
super(LazyConfig, self).add(value)
if not self._materialized:
# Buffer additions to end.
self._lazy_suffix += self.sources
del self.sources[:]
def set(self, value):
super(LazyConfig, self).set(value)
if not self._materialized:
# Buffer additions to beginning.
self._lazy_prefix[:0] = self.sources
del self.sources[:]
def clear(self):
"""Remove all sources from this configuration."""
del self.sources[:]
self._lazy_suffix = []
self._lazy_prefix = []
|
{
"content_hash": "37297e7b74310d451bf541667f90bc29",
"timestamp": "",
"source": "github",
"line_count": 897,
"max_line_length": 79,
"avg_line_length": 33.96321070234114,
"alnum_prop": 0.5892663712456918,
"repo_name": "iamdankaufman/beets",
"id": "3693f39f4b29f133856b2ee88bbe310a5a41d86a",
"size": "31113",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "beets/util/confit.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
import mxnet as mx
import numpy as np
import os
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
class MApMetric(mx.metric.EvalMetric):
"""
Calculate mean AP for object detection task
Parameters:
---------
ovp_thresh : float
overlap threshold for TP
use_difficult : boolean
use difficult ground-truths if applicable, otherwise just ignore
class_names : list of str
optional, if provided, will print out AP for each class
pred_idx : int
prediction index in network output list
roc_output_path
optional, if provided, will save a ROC graph for each class
tensorboard_path
optional, if provided, will save a ROC graph to tensorboard
"""
def __init__(self, ovp_thresh=0.5, use_difficult=False, class_names=None,
pred_idx=0, roc_output_path=None, tensorboard_path=None):
super(MApMetric, self).__init__('mAP')
if class_names is None:
self.num = None
else:
assert isinstance(class_names, (list, tuple))
for name in class_names:
assert isinstance(name, str), "must provide names as str"
num = len(class_names)
self.name = class_names + ['mAP']
self.num = num + 1
self.reset()
self.ovp_thresh = ovp_thresh
self.use_difficult = use_difficult
self.class_names = class_names
self.pred_idx = int(pred_idx)
self.roc_output_path = roc_output_path
self.tensorboard_path = tensorboard_path
def save_roc_graph(self, recall=None, prec=None, classkey=1, path=None, ap=None):
if not os.path.exists(path):
os.mkdir(path)
plot_path = os.path.join(path, 'roc_'+self.class_names[classkey])
if os.path.exists(plot_path):
os.remove(plot_path)
fig = plt.figure()
plt.title(self.class_names[classkey])
plt.plot(recall, prec, 'b', label='AP = %0.2f' % ap)
plt.legend(loc='lower right')
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel('Precision')
plt.xlabel('Recall')
plt.savefig(plot_path)
plt.close(fig)
def reset(self):
"""Clear the internal statistics to initial state."""
if getattr(self, 'num', None) is None:
self.num_inst = 0
self.sum_metric = 0.0
else:
self.num_inst = [0] * self.num
self.sum_metric = [0.0] * self.num
self.records = dict()
self.counts = dict()
def get(self):
"""Get the current evaluation result.
Returns
-------
name : str
Name of the metric.
value : float
Value of the evaluation.
"""
self._update() # update metric at this time
if self.num is None:
if self.num_inst == 0:
return (self.name, float('nan'))
else:
return (self.name, self.sum_metric / self.num_inst)
else:
names = ['%s'%(self.name[i]) for i in range(self.num)]
values = [x / y if y != 0 else float('nan') \
for x, y in zip(self.sum_metric, self.num_inst)]
return (names, values)
def update(self, labels, preds):
"""
Update internal records. This function now only update internal buffer,
sum_metric and num_inst are updated in _update() function instead when
get() is called to return results.
Params:
----------
labels: mx.nd.array (n * 6) or (n * 5), difficult column is optional
2-d array of ground-truths, n objects(id-xmin-ymin-xmax-ymax-[difficult])
preds: mx.nd.array (m * 6)
2-d array of detections, m objects(id-score-xmin-ymin-xmax-ymax)
"""
def iou(x, ys):
"""
Calculate intersection-over-union overlap
Params:
----------
x : numpy.array
single box [xmin, ymin ,xmax, ymax]
ys : numpy.array
multiple box [[xmin, ymin, xmax, ymax], [...], ]
Returns:
-----------
numpy.array
[iou1, iou2, ...], size == ys.shape[0]
"""
ixmin = np.maximum(ys[:, 0], x[0])
iymin = np.maximum(ys[:, 1], x[1])
ixmax = np.minimum(ys[:, 2], x[2])
iymax = np.minimum(ys[:, 3], x[3])
iw = np.maximum(ixmax - ixmin, 0.)
ih = np.maximum(iymax - iymin, 0.)
inters = iw * ih
uni = (x[2] - x[0]) * (x[3] - x[1]) + (ys[:, 2] - ys[:, 0]) * \
(ys[:, 3] - ys[:, 1]) - inters
ious = inters / uni
ious[uni < 1e-12] = 0 # in case bad boxes
return ious
# independant execution for each image
for i in range(labels[0].shape[0]):
# get as numpy arrays
label = labels[0][i].asnumpy()
pred = preds[self.pred_idx][i].asnumpy()
# calculate for each class
while (pred.shape[0] > 0):
cid = int(pred[0, 0])
indices = np.where(pred[:, 0].astype(int) == cid)[0]
if cid < 0:
pred = np.delete(pred, indices, axis=0)
continue
dets = pred[indices]
pred = np.delete(pred, indices, axis=0)
# sort by score, desceding
dets[dets[:,1].argsort()[::-1]]
records = np.hstack((dets[:, 1][:, np.newaxis], np.zeros((dets.shape[0], 1))))
# ground-truths
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
gts = label[label_indices, :]
label = np.delete(label, label_indices, axis=0)
if gts.size > 0:
found = [False] * gts.shape[0]
for j in range(dets.shape[0]):
# compute overlaps
ious = iou(dets[j, 2:], gts[:, 1:5])
ovargmax = np.argmax(ious)
ovmax = ious[ovargmax]
if ovmax > self.ovp_thresh:
if (not self.use_difficult and
gts.shape[1] >= 6 and
gts[ovargmax, 5] > 0):
pass
else:
if not found[ovargmax]:
records[j, -1] = 1 # tp
found[ovargmax] = True
else:
# duplicate
records[j, -1] = 2 # fp
else:
records[j, -1] = 2 # fp
else:
# no gt, mark all fp
records[:, -1] = 2
# ground truth count
if (not self.use_difficult and gts.shape[1] >= 6):
gt_count = np.sum(gts[:, 5] < 1)
else:
gt_count = gts.shape[0]
# now we push records to buffer
# first column: score, second column: tp/fp
# 0: not set(matched to difficult or something), 1: tp, 2: fp
records = records[np.where(records[:, -1] > 0)[0], :]
if records.size > 0:
self._insert(cid, records, gt_count)
# add missing class if not present in prediction
while (label.shape[0] > 0):
cid = int(label[0, 0])
label_indices = np.where(label[:, 0].astype(int) == cid)[0]
label = np.delete(label, label_indices, axis=0)
if cid < 0:
continue
gt_count = label_indices.size
self._insert(cid, np.array([[0, 0]]), gt_count)
def _update(self):
""" update num_inst and sum_metric """
aps = []
for k, v in self.records.items():
recall, prec = self._recall_prec(v, self.counts[k])
ap = self._average_precision(recall, prec)
if self.roc_output_path is not None:
self.save_roc_graph(recall=recall, prec=prec, classkey=k, path=self.roc_output_path, ap=ap)
aps.append(ap)
if self.num is not None and k < (self.num - 1):
self.sum_metric[k] = ap
self.num_inst[k] = 1
if self.num is None:
self.num_inst = 1
self.sum_metric = np.mean(aps)
else:
self.num_inst[-1] = 1
self.sum_metric[-1] = np.mean(aps)
def _recall_prec(self, record, count):
""" get recall and precision from internal records """
record = np.delete(record, np.where(record[:, 1].astype(int) == 0)[0], axis=0)
sorted_records = record[record[:,0].argsort()[::-1]]
tp = np.cumsum(sorted_records[:, 1].astype(int) == 1)
fp = np.cumsum(sorted_records[:, 1].astype(int) == 2)
if count <= 0:
recall = tp * 0.0
else:
recall = tp / float(count)
prec = tp.astype(float) / (tp + fp)
return recall, prec
def _average_precision(self, rec, prec):
"""
calculate average precision
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
# append sentinel values at both ends
mrec = np.concatenate(([0.], rec, [1.]))
mpre = np.concatenate(([0.], prec, [0.]))
# compute precision integration ladder
for i in range(mpre.size - 1, 0, -1):
mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
# look for recall value changes
i = np.where(mrec[1:] != mrec[:-1])[0]
# sum (\delta recall) * prec
ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
def _insert(self, key, records, count):
""" Insert records according to key """
if key not in self.records:
assert key not in self.counts
self.records[key] = records
self.counts[key] = count
else:
self.records[key] = np.vstack((self.records[key], records))
assert key in self.counts
self.counts[key] += count
class VOC07MApMetric(MApMetric):
""" Mean average precision metric for PASCAL V0C 07 dataset """
def __init__(self, *args, **kwargs):
super(VOC07MApMetric, self).__init__(*args, **kwargs)
def _average_precision(self, rec, prec):
"""
calculate average precision, override the default one,
special 11-point metric
Params:
----------
rec : numpy.array
cumulated recall
prec : numpy.array
cumulated precision
Returns:
----------
ap as float
"""
ap = 0.
for t in np.arange(0., 1.1, 0.1):
if np.sum(rec >= t) == 0:
p = 0
else:
p = np.max(prec[rec >= t])
ap += p / 11.
return ap
|
{
"content_hash": "1dd55aceea3cefb99e661dfffe2e690c",
"timestamp": "",
"source": "github",
"line_count": 307,
"max_line_length": 107,
"avg_line_length": 37.06188925081433,
"alnum_prop": 0.48242221831604853,
"repo_name": "zhreshold/mxnet-ssd",
"id": "796bee8b8f1183fe7c9743f266cf076188e3c278",
"size": "11378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "evaluate/eval_metric.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "69"
},
{
"name": "Makefile",
"bytes": "308"
},
{
"name": "Python",
"bytes": "267666"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
}
|
import functools
import unittest2
from sentry import app
from sentry.db import get_backend
def with_settings(**settings):
def wrapped(func):
@functools.wraps(func)
def _wrapped(*args, **kwargs):
defaults = {}
for k, v in settings.iteritems():
defaults[k] = app.config.get(k)
app.config[k] = v
try:
return func(*args, **kwargs)
finally:
for k, v in defaults.iteritems():
app.config[k] = v
return _wrapped
return wrapped
class BaseTest(unittest2.TestCase):
def setUp(self):
# XXX: might be a better way to do do this
app.config['DATASTORE'] = {
'ENGINE': 'sentry.db.backends.redis.RedisBackend',
'OPTIONS': {
'db': 9
}
}
app.config['CLIENT'] = 'sentry.client.base.SentryClient'
app.db = get_backend(app)
# Flush the Redis instance
app.db.conn.flushdb()
self.client = app.test_client()
|
{
"content_hash": "d41c188062e22e13410397be22b5d04d",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 64,
"avg_line_length": 28.63157894736842,
"alnum_prop": 0.5220588235294118,
"repo_name": "dcramer/sentry-old",
"id": "638773d4a16ef222861fe146799b95b1a538f7e4",
"size": "1088",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "10085"
},
{
"name": "Python",
"bytes": "183975"
},
{
"name": "Shell",
"bytes": "4106"
}
],
"symlink_target": ""
}
|
import os
import sys
from urlparse import urlparse
import pymongo
from pyramid.paster import (
bootstrap,
setup_logging
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) != 2:
usage(argv)
config_uri = argv[1]
setup_logging(config_uri)
env = bootstrap(config_uri)
settings = env['registry'].settings
mongo_url = urlparse(settings['mongo_uri'])
client = pymongo.MongoClient(mongo_url.hostname, mongo_url.port)
db = client[mongo_url.path[1:]]
db.user.insert({'user':'admin', 'display_name': 'Wayne Witzel III',
'password':'password', 'groups':['admin']})
db.settings.insert({'title':'Blog', 'subtitle':'This is my blog.',
'email':'wayne@pieceofpy.com'})
if __name__ == '__main__':
main()
|
{
"content_hash": "00ba09d1acf6e4b5e587b5a657271a59",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 71,
"avg_line_length": 26.38888888888889,
"alnum_prop": 0.6,
"repo_name": "wwitzel3/pinto",
"id": "0b989982324322dbf322ac8a54375cc16c9445d1",
"size": "950",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pinto/pinto/scripts/create.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "344062"
},
{
"name": "JavaScript",
"bytes": "370946"
},
{
"name": "Python",
"bytes": "13258"
},
{
"name": "Ruby",
"bytes": "927"
}
],
"symlink_target": ""
}
|
class BlurayRating(object):
def __init__(self):
self.__video = None
self.__audio = None
self.__extras = None
self.__link = None
@property
def video(self):
return self.__video
@video.setter
def video(self, video):
self.__video = video
@property
def audio(self):
return self.__audio
@audio.setter
def audio(self, audio):
self.__audio = audio
@property
def extras(self):
return self.__extras
@extras.setter
def extras(self, extras):
self.__extras = extras
@property
def link(self):
return self.__link
@link.setter
def link (self, link):
self.__link = link
def __str__(self):
return "{Video: " + self.video + ", Audio: " + self.audio + ", Extras: " + self.extras + "}"
|
{
"content_hash": "11ca41140e50df7acedd1ce40fba96f3",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 100,
"avg_line_length": 20.73170731707317,
"alnum_prop": 0.5341176470588235,
"repo_name": "jeremyrea/caterblu",
"id": "497b158748e6693fd24a6128605aeab7fd6f2c09",
"size": "850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "source/models/bluray_rating.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1595"
},
{
"name": "HTML",
"bytes": "5233"
},
{
"name": "JavaScript",
"bytes": "1718"
},
{
"name": "Python",
"bytes": "21192"
},
{
"name": "Shell",
"bytes": "226"
}
],
"symlink_target": ""
}
|
import json
import py
import requests
issues_url = "https://api.github.com/repos/pytest-dev/pytest/issues"
def get_issues():
issues = []
url = issues_url
while 1:
get_data = {"state": "all"}
r = requests.get(url, params=get_data)
data = r.json()
if r.status_code == 403:
# API request limit exceeded
print(data["message"])
exit(1)
issues.extend(data)
# Look for next page
links = requests.utils.parse_header_links(r.headers["Link"])
another_page = False
for link in links:
if link["rel"] == "next":
url = link["url"]
another_page = True
if not another_page:
return issues
def main(args):
cachefile = py.path.local(args.cache)
if not cachefile.exists() or args.refresh:
issues = get_issues()
cachefile.write(json.dumps(issues))
else:
issues = json.loads(cachefile.read())
open_issues = [x for x in issues if x["state"] == "open"]
open_issues.sort(key=lambda x: x["number"])
report(open_issues)
def _get_kind(issue):
labels = [l["name"] for l in issue["labels"]]
for key in ("bug", "enhancement", "proposal"):
if key in labels:
return key
return "issue"
def report(issues):
for issue in issues:
title = issue["title"]
# body = issue["body"]
kind = _get_kind(issue)
status = issue["state"]
number = issue["number"]
link = "https://github.com/pytest-dev/pytest/issues/%s/" % number
print("----")
print(status, kind, link)
print(title)
# print()
# lines = body.split("\n")
# print ("\n".join(lines[:3]))
# if len(lines) > 3 or len(body) > 240:
# print ("...")
print("\n\nFound %s open issues" % len(issues))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser("process bitbucket issues")
parser.add_argument(
"--refresh", action="store_true", help="invalidate cache, refresh issues"
)
parser.add_argument(
"--cache", action="store", default="issues.json", help="cache file"
)
args = parser.parse_args()
main(args)
|
{
"content_hash": "459488e2f8be6f1ac26eeab39ef0e695",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 81,
"avg_line_length": 26.870588235294118,
"alnum_prop": 0.5521015761821366,
"repo_name": "txomon/pytest",
"id": "25bfc3e9a0925c5a22b6150919ce8d6551cbddb7",
"size": "2284",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "extra/get_issues.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1293"
},
{
"name": "Python",
"bytes": "1829248"
}
],
"symlink_target": ""
}
|
import collections
import os
from typing import List
from typing import Tuple
from paddle.utils import download
from paddle.dataset.common import DATA_HOME
from .dataset import AudioClassificationDataset
__all__ = []
class TESS(AudioClassificationDataset):
"""
TESS is a set of 200 target words were spoken in the carrier phrase
"Say the word _____' by two actresses (aged 26 and 64 years) and
recordings were made of the set portraying each of seven emotions(anger,
disgust, fear, happiness, pleasant surprise, sadness, and neutral).
There are 2800 stimuli in total.
Reference:
Toronto emotional speech set (TESS) https://tspace.library.utoronto.ca/handle/1807/24487
https://doi.org/10.5683/SP2/E8H2MF
Args:
mode (str, optional): It identifies the dataset mode (train or dev). Defaults to train.
n_folds (int, optional): Split the dataset into n folds. 1 fold for dev dataset and n-1 for train dataset. Defaults to 5.
split (int, optional): It specify the fold of dev dataset. Defaults to 1.
feat_type (str, optional): It identifies the feature type that user wants to extrace of an audio file. Defaults to raw.
archive(dict): it tells where to download the audio archive. Defaults to None.
Returns:
:ref:`api_paddle_io_Dataset`. An instance of TESS dataset.
Examples:
.. code-block:: python
import paddle
mode = 'dev'
tess_dataset = paddle.audio.datasets.TESS(mode=mode,
feat_type='raw')
for idx in range(5):
audio, label = tess_dataset[idx]
# do something with audio, label
print(audio.shape, label)
# [audio_data_length] , label_id
tess_dataset = paddle.audio.datasets.TESS(mode=mode,
feat_type='mfcc',
n_mfcc=40)
for idx in range(5):
audio, label = tess_dataset[idx]
# do something with mfcc feature, label
print(audio.shape, label)
# [feature_dim, num_frames] , label_id
"""
archive = {
'url': 'https://bj.bcebos.com/paddleaudio/datasets/TESS_Toronto_emotional_speech_set.zip',
'md5': '1465311b24d1de704c4c63e4ccc470c7',
}
label_list = [
'angry',
'disgust',
'fear',
'happy',
'neutral',
'ps', # pleasant surprise
'sad',
]
meta_info = collections.namedtuple(
'META_INFO', ('speaker', 'word', 'emotion')
)
audio_path = 'TESS_Toronto_emotional_speech_set'
def __init__(
self,
mode: str = 'train',
n_folds: int = 5,
split: int = 1,
feat_type: str = 'raw',
archive=None,
**kwargs,
):
assert isinstance(n_folds, int) and (
n_folds >= 1
), f'the n_folds should be integer and n_folds >= 1, but got {n_folds}'
assert split in range(
1, n_folds + 1
), f'The selected split should be integer and should be 1 <= split <= {n_folds}, but got {split}'
if archive is not None:
self.archive = archive
files, labels = self._get_data(mode, n_folds, split)
super().__init__(
files=files, labels=labels, feat_type=feat_type, **kwargs
)
def _get_meta_info(self, files) -> List[collections.namedtuple]:
ret = []
for file in files:
basename_without_extend = os.path.basename(file)[:-4]
ret.append(self.meta_info(*basename_without_extend.split('_')))
return ret
def _get_data(
self, mode: str, n_folds: int, split: int
) -> Tuple[List[str], List[int]]:
if not os.path.isdir(os.path.join(DATA_HOME, self.audio_path)):
download.get_path_from_url(
self.archive['url'],
DATA_HOME,
self.archive['md5'],
decompress=True,
)
wav_files = []
for root, _, files in os.walk(os.path.join(DATA_HOME, self.audio_path)):
for file in files:
if file.endswith('.wav'):
wav_files.append(os.path.join(root, file))
meta_info = self._get_meta_info(wav_files)
files = []
labels = []
for idx, sample in enumerate(meta_info):
_, _, emotion = sample
target = self.label_list.index(emotion)
fold = idx % n_folds + 1
if mode == 'train' and int(fold) != split:
files.append(wav_files[idx])
labels.append(target)
if mode != 'train' and int(fold) == split:
files.append(wav_files[idx])
labels.append(target)
return files, labels
|
{
"content_hash": "ac48531bef5d113fea9b907c263f5311",
"timestamp": "",
"source": "github",
"line_count": 142,
"max_line_length": 128,
"avg_line_length": 34.82394366197183,
"alnum_prop": 0.5518705763397371,
"repo_name": "luotao1/Paddle",
"id": "46ee1425ec9fb30376c1678e8bc188328bf3711d",
"size": "5555",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "python/paddle/audio/datasets/tess.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "58544"
},
{
"name": "C",
"bytes": "210300"
},
{
"name": "C++",
"bytes": "36771446"
},
{
"name": "CMake",
"bytes": "903079"
},
{
"name": "Cuda",
"bytes": "5200715"
},
{
"name": "Dockerfile",
"bytes": "4361"
},
{
"name": "Go",
"bytes": "49796"
},
{
"name": "Java",
"bytes": "16630"
},
{
"name": "Jinja",
"bytes": "23852"
},
{
"name": "MLIR",
"bytes": "39982"
},
{
"name": "Python",
"bytes": "36248258"
},
{
"name": "R",
"bytes": "1332"
},
{
"name": "Shell",
"bytes": "553175"
}
],
"symlink_target": ""
}
|
from math import log2
from yace.util.math import is_power_of_two
class AddressDecoder:
"""Maps components into equally sized blocks in address space"""
def __init__(self, address_bits, masked_bits):
"""
Create a new decoder instance.
:param address_bits: the number of bits in this address space
:param masked_bits: the number of most significant bits reserved for decoding
"""
assert address_bits > 0, "number of bits must be non-negative"
assert masked_bits > 0, "number of bits must be non-negative"
assert masked_bits <= address_bits, "can't mask more than address bits"
self.address_bits = address_bits
self.masked_bits = masked_bits
self.mask = (2 ** self.address_bits) - 1 >> self.masked_bits
self.blocks = [None] * (2 ** self.masked_bits)
def map(self, component, block):
"""
Map a component to a block of this decoder
:param component: the component to map
:param block: the block to map the component to
:return:
"""
assert block < len(self.blocks), "block not mapped: {}".format(block)
assert self.blocks[block] is None, "block already mapped"
self.blocks[block] = component
def write(self, address, value):
"""
Write a byte into mapped memory
:param address: the address to write to
:param value: the value to write
"""
block = self._get_block(address)
self.blocks[block].write(address & self.mask, value)
def read(self, address):
"""
Read a byte from mapped memory
:param address: the address to read from
:return: the read byte
"""
block = self._get_block(address)
return self.blocks[block].read(address & self.mask)
def _get_block(self, address):
return address >> (self.address_bits - self.masked_bits)
|
{
"content_hash": "ed05e8616ed8ff2820ad8888730ac417",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 85,
"avg_line_length": 36.56603773584906,
"alnum_prop": 0.6171310629514963,
"repo_name": "tobier/yace",
"id": "b9f5ddaa35b19622a16dbf1f219a89505809c6c8",
"size": "3049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "yace/emulation/generic/decoder.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21049"
}
],
"symlink_target": ""
}
|
class EmitterCallbacks(object):
def __init__(self, emitter):
self._emitter = emitter
def emitter(self, data):
self._emitter(data)
##############
### Runner ###
##############
'''
Called when task in playbook fails.
'''
def on_failed(self, host, res, ignore_errors=False):
return_dict = {"event": "failed", "host": host, "res": res, "ignore_error": ignore_errors}
self.emitter(return_dict)
'''
Called when task in playbook is successful.
'''
def on_ok(self, host, res):
return_dict = {"event": "ok", "host": host, "res": res}
self.emitter(return_dict)
'''
Called when task in playbook is skipped due to some flag being set telling the task not to run hence it is skipped.
**NOTE** This does NOT signify an error simple the task has not been performed.
'''
def on_skipped(self, host, item=None):
return_dict = {"event": "skipped", "host": host, "item": item}
self.emitter(return_dict)
'''
Called when the host of the task that it is attemping to run on is not available.
'''
def on_unreachable(self, host, res):
return_dict = {"event": "unreachable", "host": host, "res": res}
self.emitter(return_dict)
'''
Called when there are no hosts left to perform the actions on.
**NOTE** This DOES signify an error.
'''
def on_no_hosts(self):
return_dict = {"event": "no_hosts"}
self.emitter(return_dict)
'''
TODO.
'''
def on_async_poll(self, host, res, jid, clock):
pass
'''
TODO.
'''
def on_async_ok(self, host, res, jid):
pass
'''
TODO.
'''
def on_async_failed(self, host, res, jid):
pass
################
### PlayBook ###
################
'''
Called when the running of the playbook begins.
'''
def on_start(self):
return_dict = {"event": "start"}
self.emitter(return_dict)
'''
TODO.
'''
def on_notify(self, host, handler):
return_dict = {"event": "notify", "host": host, "handler": handler}
self.emitter(return_dict)
'''
TODO.
'''
def on_no_hosts_matched(self):
return_dict = {"event": "no_hosts_matched"}
self.emitter(return_dict)
'''
TODO.
'''
def on_no_hosts_remaining(self):
return_dict = {"event": "no_hosts_remaining"}
self.emitter(return_dict)
'''
TODO.
'''
def on_task_start(self, name, is_conditional):
return_dict = {"event": "ok", "name": name, "conditional": is_conditional}
self.emitter(return_dict)
'''
Called when the user is meant to be prompted for an input.
**NOTE** This will NOT be used in our code since we are doing the playbook calls from the website the user won't have a chance to input things.
'''
def on_vars_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
return_dict = {"event": "prompt", "varname": varname, "private": private, "prompt": None, "encrypt": None, "confirm": confirm, "salt_size": salt_size, "salt": salt, "default": default}
self.emitter(return_dict)
'''
TODO.
'''
def on_setup(self):
return_dict = {"event": "setup"}
self.emitter(return_dict)
'''
TODO.
'''
def on_import_for_host(self, host, imported_file):
return_dict = {"event": "import_for_host", "host": host, "imported_file": imported_file}
self.emitter(return_dict)
'''
TODO.
'''
def on_not_import_for_host(self, host, missing_file):
return_dict = {"event": "not_import_for_host", "host": host, "missing_file": missing_file}
self.emitter(return_dict)
'''
TODO.
'''
def on_play_start(self, name):
return_dict = {"event": "play_start", "name": name}
self.emitter(return_dict)
'''
TODO.
'''
def on_stats(self, stats):
return_dict = {"event": "stats", "stats": stats}
self.emitter(return_dict)
'''
A custom callback, added for usage within tachyon and /not/ called by the
Ansible API. This callback is called after all attempts at running have
ceased.
'''
def on_complete(self):
return_dict = {"event": "complete"}
self.emitter(return_dict)
|
{
"content_hash": "e020751bbc022d4b1fe7f28785411723",
"timestamp": "",
"source": "github",
"line_count": 162,
"max_line_length": 192,
"avg_line_length": 27.271604938271604,
"alnum_prop": 0.5665459483929379,
"repo_name": "RoboPython/neontower",
"id": "abf9b4da79d6792c4dfd558e9528fae6b21d0132",
"size": "4418",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tachyon/emitter_callbacks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "364"
},
{
"name": "HTML",
"bytes": "182"
},
{
"name": "Python",
"bytes": "16643"
}
],
"symlink_target": ""
}
|
"""mysql_float_to_timestamp
Revision ID: 5c4f93e5bb4
Revises: 7e6f9d542f8b
Create Date: 2016-07-25 15:36:36.469847
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import func
from gnocchi.indexer import sqlalchemy_base
# revision identifiers, used by Alembic.
revision = '5c4f93e5bb4'
down_revision = '27d2a1d205ff'
branch_labels = None
depends_on = None
def upgrade():
bind = op.get_bind()
if bind and bind.engine.name == "mysql":
op.execute("SET time_zone = '+00:00'")
# NOTE(jd) So that crappy engine that is MySQL does not have "ALTER
# TABLE … USING …". We need to copy everything and convert…
for table_name, column_name in (("resource", "started_at"),
("resource", "ended_at"),
("resource", "revision_start"),
("resource_history", "started_at"),
("resource_history", "ended_at"),
("resource_history", "revision_start"),
("resource_history", "revision_end"),
("resource_type", "updated_at")):
nullable = column_name == "ended_at"
existing_type = sa.types.DECIMAL(
precision=20, scale=6, asdecimal=True)
existing_col = sa.Column(
column_name,
existing_type,
nullable=nullable)
temp_col = sa.Column(
column_name + "_ts",
sqlalchemy_base.TimestampUTC(),
nullable=True)
op.add_column(table_name, temp_col)
t = sa.sql.table(table_name, existing_col, temp_col)
op.execute(t.update().values(
**{column_name + "_ts": func.from_unixtime(existing_col)}))
op.drop_column(table_name, column_name)
op.alter_column(table_name,
column_name + "_ts",
nullable=nullable,
type_=sqlalchemy_base.TimestampUTC(),
existing_nullable=nullable,
existing_type=existing_type,
new_column_name=column_name)
|
{
"content_hash": "b1d7b6e552dcc4e1f58a67379015f5a9",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 79,
"avg_line_length": 38.983333333333334,
"alnum_prop": 0.5027789653698161,
"repo_name": "leandroreox/gnocchi",
"id": "824a3e93a516353247a20b3526bca89a245dd05a",
"size": "2988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gnocchi/indexer/alembic/versions/5c4f93e5bb4_mysql_float_to_timestamp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "1064"
},
{
"name": "Python",
"bytes": "807668"
},
{
"name": "Shell",
"bytes": "24197"
}
],
"symlink_target": ""
}
|
import abc
import weakref
from datetime import datetime
class TaskDetail(object):
"""Task details have the bare minimum of these fields/methods."""
def __init__(self, name, metadata=None):
self.date_created = datetime.utcnow()
self.name = name
self.metadata = metadata
self.date_updated = None
def __str__(self):
return "TaskDetail (%s, %s): %s" % (self.name, self.date_created,
self.metadata)
class FlowDetail(object):
"""Flow details have the bare minimum of these fields/methods."""
__metaclass__ = abc.ABCMeta
def __init__(self, book, name):
self.book = weakref.proxy(book)
self.name = name
@abc.abstractmethod
def __iter__(self):
"""Iterates over all task details.
The order will be in the same order that they were added."""
raise NotImplementedError()
@abc.abstractmethod
def __contains__(self, task_name):
"""Determines if any task details with the given name exists in this
flow details."""
raise NotImplementedError()
@abc.abstractmethod
def __getitem__(self, task_name):
"""Fetch any task details that match the given task name."""
raise NotImplementedError()
@abc.abstractmethod
def add_task(self, task_name, metadata=None):
"""Atomically creates a new task detail entry to this flows details and
returns it for further use."""
raise NotImplementedError()
@abc.abstractmethod
def __delitem__(self, task_name):
"""Deletes any task details that match the given task name."""
raise NotImplementedError()
@abc.abstractmethod
def __len__(self):
"""Returns how many task details objects the flow contains."""
raise NotImplementedError()
def __str__(self):
return "FlowDetail (%s): %s entries" % (self.name, len(self))
class LogBook(object):
"""Base class for what a logbook should provide"""
__metaclass__ = abc.ABCMeta
@abc.abstractmethod
def add_flow(self, flow_name):
"""Atomically adds and returns a new flow details object to the given
logbook or raises an exception if that flow (or a flow with that name)
already exists.
"""
raise NotImplementedError()
@abc.abstractmethod
def __getitem__(self, flow_name):
"""Fetches the given flow details object for the given flow
name or raises an exception if that flow name does not exist."""
raise NotImplementedError()
@abc.abstractmethod
def __contains__(self, flow_name):
"""Determines if a flow details object with the given flow name
exists in this logbook."""
raise NotImplementedError()
@abc.abstractmethod
def __delitem__(self, flow_name):
"""Removes the given flow details object that matches the provided
flow name or raises an exception if that flow name does not
exist."""
raise NotImplementedError()
@abc.abstractmethod
def __iter__(self):
"""Iterates over all the contained flow details.
The order will be in the same order that they were added."""
raise NotImplementedError()
@abc.abstractmethod
def __len__(self):
"""Returns how many flow details the logbook contains."""
raise NotImplementedError()
def close(self):
"""Allows the logbook to free any resources that it has."""
pass
|
{
"content_hash": "b2943683589715d7d20fa77a2d89a751",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 79,
"avg_line_length": 30.86842105263158,
"alnum_prop": 0.6288718385905087,
"repo_name": "JohnGarbutt/taskflow-1",
"id": "943896e8ff981d630945466b92101711cf267a28",
"size": "4221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskflow/logbook.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
from django.db import models
class Pais(models.Model):
nome = models.CharField(max_length=100, unique=True)
sigla = models.CharField(max_length=2, unique=True)
class Meta:
verbose_name = 'País'
verbose_name_plural = 'Países'
def __unicode__(self):
return '%s' % self.sigla
class Uf(models.Model):
nome = models.CharField(max_length=100, unique=True)
sigla = models.CharField(max_length=2, unique=True)
pais = models.ForeignKey(Pais, verbose_name='País')
class Meta:
verbose_name = 'Uf'
verbose_name_plural = 'Ufs'
def __unicode__(self):
return '%s' % self.sigla
class Cidade(models.Model):
nome = models.CharField(max_length=100)
uf = models.ForeignKey(Uf)
def __unicode__(self):
return '%s' % self.nome
class Endereco(models.Model):
logradouro = models.CharField(max_length=255)
complemento = models.CharField(max_length=255, null=True, blank=True)
bairro = models.CharField(max_length=100)
cidade = models.ForeignKey(Cidade)
cep = models.CharField(max_length=9, blank=True)
class Meta:
verbose_name = 'Endereço'
verbose_name_plural = 'Endereços'
def __unicode__(self):
return '%s %s %s %s %s' % (self.logradouro, self.complemento, self.bairro, self.cidade, self.cep)
|
{
"content_hash": "d7ddcefed0a36ecf6ffd3970b6df223a",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 105,
"avg_line_length": 27.895833333333332,
"alnum_prop": 0.6445108289768484,
"repo_name": "pydawan/protetores_bucais",
"id": "54133cb6e0f89ab41fa1acd4c4be5ef14025c026",
"size": "1368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "protetores_bucais/apps/localizacao/models.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "353386"
},
{
"name": "HTML",
"bytes": "110808"
},
{
"name": "JavaScript",
"bytes": "661585"
},
{
"name": "Python",
"bytes": "43256"
}
],
"symlink_target": ""
}
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.importlib import import_module
__version__ = (0, 0, 1)
try:
conf = settings.PAUTH
except AttributeError:
raise ImproperlyConfigured("django-pauth requires configuration.")
prefer = conf.get('check_first', 'internal')
internal = {}
external = {}
internal = dict(map(lambda x: (x.name, x),
[import_module(p).provider for p in conf['providers'].get('internal', [])]))
external = dict(map(lambda x: (x.name, x),
[import_module(p).provider for p in conf['providers'].get('external', [])]))
import pdb; pdb.set_trace()
loaded_providers = internal.copy()
loaded_providers.update(external)
[lp.configure(conf['vendor']) for lp in loaded_providers.values()]
|
{
"content_hash": "9a34d808fd91543b54cbabe2ef494478",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 109,
"avg_line_length": 30.85185185185185,
"alnum_prop": 0.6686674669867947,
"repo_name": "spuriousdata/django-pauth",
"id": "b1a4ba945c66713d8a1dcd88f66a541f3ed1ced8",
"size": "833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pauth/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22980"
}
],
"symlink_target": ""
}
|
from tempest.api.volume import base
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class ExtensionsV2TestJSON(base.BaseVolumeTest):
@test.attr(type='gate')
def test_list_extensions(self):
# List of all extensions
_, extensions = self.volumes_extension_client.list_extensions()
if len(CONF.volume_feature_enabled.api_extensions) == 0:
raise self.skipException('There are not any extensions configured')
extension_list = [extension.get('alias') for extension in extensions]
LOG.debug("Cinder extensions: %s" % ','.join(extension_list))
ext = CONF.volume_feature_enabled.api_extensions[0]
if ext == 'all':
self.assertIn('Hosts', map(lambda x: x['name'], extensions))
elif ext:
self.assertIn(ext, map(lambda x: x['name'], extensions))
else:
raise self.skipException('There are not any extensions configured')
class ExtensionsV2TestXML(ExtensionsV2TestJSON):
_interface = 'xml'
class ExtensionsV1TestJSON(ExtensionsV2TestJSON):
_api_version = 1
class ExtensionsV1TestXML(ExtensionsV1TestJSON):
_interface = 'xml'
|
{
"content_hash": "04b7d1c2e4ce0f5a249a69765f26868a",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 79,
"avg_line_length": 31.75,
"alnum_prop": 0.684251968503937,
"repo_name": "queria/my-tempest",
"id": "4fc6ee40f4b77df96148a6eda9d4cead9c1acd99",
"size": "1896",
"binary": false,
"copies": "3",
"ref": "refs/heads/juno",
"path": "tempest/api/volume/test_extensions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3392805"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
}
|
from compose.config.errors import DependencyError
from compose.config.sort_services import sort_service_dicts
from compose.config.types import VolumeFromSpec
from tests import unittest
class SortServiceTest(unittest.TestCase):
def test_sort_service_dicts_1(self):
services = [
{
'links': ['redis'],
'name': 'web'
},
{
'name': 'grunt'
},
{
'name': 'redis'
}
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 3)
self.assertEqual(sorted_services[0]['name'], 'grunt')
self.assertEqual(sorted_services[1]['name'], 'redis')
self.assertEqual(sorted_services[2]['name'], 'web')
def test_sort_service_dicts_2(self):
services = [
{
'links': ['redis', 'postgres'],
'name': 'web'
},
{
'name': 'postgres',
'links': ['redis']
},
{
'name': 'redis'
}
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 3)
self.assertEqual(sorted_services[0]['name'], 'redis')
self.assertEqual(sorted_services[1]['name'], 'postgres')
self.assertEqual(sorted_services[2]['name'], 'web')
def test_sort_service_dicts_3(self):
services = [
{
'name': 'child'
},
{
'name': 'parent',
'links': ['child']
},
{
'links': ['parent'],
'name': 'grandparent'
},
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 3)
self.assertEqual(sorted_services[0]['name'], 'child')
self.assertEqual(sorted_services[1]['name'], 'parent')
self.assertEqual(sorted_services[2]['name'], 'grandparent')
def test_sort_service_dicts_4(self):
services = [
{
'name': 'child'
},
{
'name': 'parent',
'volumes_from': [VolumeFromSpec('child', 'rw')]
},
{
'links': ['parent'],
'name': 'grandparent'
},
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 3)
self.assertEqual(sorted_services[0]['name'], 'child')
self.assertEqual(sorted_services[1]['name'], 'parent')
self.assertEqual(sorted_services[2]['name'], 'grandparent')
def test_sort_service_dicts_5(self):
services = [
{
'links': ['parent'],
'name': 'grandparent'
},
{
'name': 'parent',
'net': 'container:child'
},
{
'name': 'child'
}
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 3)
self.assertEqual(sorted_services[0]['name'], 'child')
self.assertEqual(sorted_services[1]['name'], 'parent')
self.assertEqual(sorted_services[2]['name'], 'grandparent')
def test_sort_service_dicts_6(self):
services = [
{
'links': ['parent'],
'name': 'grandparent'
},
{
'name': 'parent',
'volumes_from': [VolumeFromSpec('child', 'ro')]
},
{
'name': 'child'
}
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 3)
self.assertEqual(sorted_services[0]['name'], 'child')
self.assertEqual(sorted_services[1]['name'], 'parent')
self.assertEqual(sorted_services[2]['name'], 'grandparent')
def test_sort_service_dicts_7(self):
services = [
{
'net': 'container:three',
'name': 'four'
},
{
'links': ['two'],
'name': 'three'
},
{
'name': 'two',
'volumes_from': [VolumeFromSpec('one', 'rw')]
},
{
'name': 'one'
}
]
sorted_services = sort_service_dicts(services)
self.assertEqual(len(sorted_services), 4)
self.assertEqual(sorted_services[0]['name'], 'one')
self.assertEqual(sorted_services[1]['name'], 'two')
self.assertEqual(sorted_services[2]['name'], 'three')
self.assertEqual(sorted_services[3]['name'], 'four')
def test_sort_service_dicts_circular_imports(self):
services = [
{
'links': ['redis'],
'name': 'web'
},
{
'name': 'redis',
'links': ['web']
},
]
try:
sort_service_dicts(services)
except DependencyError as e:
self.assertIn('redis', e.msg)
self.assertIn('web', e.msg)
else:
self.fail('Should have thrown an DependencyError')
def test_sort_service_dicts_circular_imports_2(self):
services = [
{
'links': ['postgres', 'redis'],
'name': 'web'
},
{
'name': 'redis',
'links': ['web']
},
{
'name': 'postgres'
}
]
try:
sort_service_dicts(services)
except DependencyError as e:
self.assertIn('redis', e.msg)
self.assertIn('web', e.msg)
else:
self.fail('Should have thrown an DependencyError')
def test_sort_service_dicts_circular_imports_3(self):
services = [
{
'links': ['b'],
'name': 'a'
},
{
'name': 'b',
'links': ['c']
},
{
'name': 'c',
'links': ['a']
}
]
try:
sort_service_dicts(services)
except DependencyError as e:
self.assertIn('a', e.msg)
self.assertIn('b', e.msg)
else:
self.fail('Should have thrown an DependencyError')
def test_sort_service_dicts_self_imports(self):
services = [
{
'links': ['web'],
'name': 'web'
},
]
try:
sort_service_dicts(services)
except DependencyError as e:
self.assertIn('web', e.msg)
else:
self.fail('Should have thrown an DependencyError')
|
{
"content_hash": "3a5e8010170992ef1257923b516f4d81",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 67,
"avg_line_length": 29.13389121338912,
"alnum_prop": 0.4486571879936809,
"repo_name": "TomasTomecek/compose",
"id": "8d0c3ae4080c101b3ffa0760598a96f9e82adfd0",
"size": "6963",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "tests/unit/config/sort_services_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "2606"
},
{
"name": "Python",
"bytes": "397292"
},
{
"name": "Shell",
"bytes": "24220"
}
],
"symlink_target": ""
}
|
from astropy.version import version as astropy_version
if astropy_version < '3.0':
# With older versions of Astropy, we actually need to import the pytest
# plugins themselves in order to make them discoverable by pytest.
from astropy.tests.pytest_plugins import *
else:
# As of Astropy 3.0, the pytest plugins provided by Astropy are
# automatically made available when Astropy is installed. This means it's
# not necessary to import them here, but we still need to import global
# variables that are used for configuration.
from astropy.tests.plugins.display import PYTEST_HEADER_MODULES, TESTED_VERSIONS
from astropy.tests.helper import enable_deprecations_as_exceptions
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions. For Astropy v2.0 or later, there are 2 additional keywords,
## as follow (although default should work for most cases).
## To ignore some packages that produce deprecation warnings on import
## (in addition to 'compiler', 'scipy', 'pygments', 'ipykernel', and
## 'setuptools'), add:
## modules_to_ignore_on_import=['module_1', 'module_2']
## To ignore some specific deprecation warning messages for Python version
## MAJOR.MINOR or later, add:
## warnings_to_ignore_by_pyver={(MAJOR, MINOR): ['Message to ignore']}
# enable_deprecations_as_exceptions()
## Uncomment and customize the following lines to add/remove entries from
## the list of packages for which version numbers are displayed when running
## the tests. Making it pass for KeyError is essential in some cases when
## the package uses other astropy affiliated packages.
# try:
# PYTEST_HEADER_MODULES['Astropy'] = 'astropy'
# PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
# del PYTEST_HEADER_MODULES['h5py']
# except (NameError, KeyError): # NameError is needed to support Astropy < 1.0
# pass
## Uncomment the following lines to display the version number of the
## package rather than the version number of Astropy in the top line when
## running the tests.
# import os
#
## This is to figure out the package version, rather than
## using Astropy's
# try:
# from .version import version
# except ImportError:
# version = 'dev'
#
# try:
# packagename = os.path.basename(os.path.dirname(__file__))
# TESTED_VERSIONS[packagename] = version
# except NameError: # Needed to support Astropy <= 1.0.0
# pass
|
{
"content_hash": "8d21bc59dcde5a1e87739a6d257f970f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 84,
"avg_line_length": 44.629629629629626,
"alnum_prop": 0.7331950207468879,
"repo_name": "crawfordsm/pyspectrograph",
"id": "ebab8a1b9be21080a438a2636dab4e9f4462939b",
"size": "2513",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "PySpectrograph/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "226914"
},
{
"name": "Ruby",
"bytes": "490883"
}
],
"symlink_target": ""
}
|
import os
from django.db import models
from django.db.models import Q
from django.contrib.contenttypes.fields import GenericRelation
from django.core.exceptions import PermissionDenied
# from geoposition.fields import GeopositionField
from core.models import PlCoreBase,PlCoreBaseManager,PlCoreBaseDeletionManager,ModelLink
from core.models import Tag
from core.models.plcorebase import StrippedCharField
from core.acl import AccessControlList
from xos.config import Config
config = Config()
class ControllerLinkDeletionManager(PlCoreBaseDeletionManager):
def get_queryset(self):
parent=super(ControllerLinkDeletionManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
backend_type = None
parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
if (backend_type):
return parent_queryset.filter(Q(controller__backend_type=backend_type))
else:
return parent_queryset
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class ControllerDeletionManager(PlCoreBaseDeletionManager):
def get_queryset(self):
parent=super(ControllerDeletionManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
backend_type = None
parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
if backend_type:
return parent_queryset.filter(Q(backend_type=backend_type))
else:
return parent_queryset
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class ControllerLinkManager(PlCoreBaseManager):
def get_queryset(self):
parent=super(ControllerLinkManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
backend_type = None
parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
if backend_type:
return parent_queryset.filter(Q(controller__backend_type=backend_type))
else:
return parent_queryset
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class ControllerManager(PlCoreBaseManager):
def get_queryset(self):
parent=super(ControllerManager, self)
try:
backend_type = config.observer_backend_type
except AttributeError:
backend_type = None
parent_queryset = parent.get_queryset() if hasattr(parent, "get_queryset") else parent.get_query_set()
if backend_type:
return parent_queryset.filter(Q(backend_type=backend_type))
else:
return parent_queryset
# deprecated in django 1.7 in favor of get_queryset().
def get_query_set(self):
return self.get_queryset()
class Site(PlCoreBase):
"""
A logical grouping of Nodes that are co-located at the same geographic location, which also typically corresponds to the Nodes' location in the physical network.
"""
name = StrippedCharField(max_length=200, help_text="Name for this Site")
site_url = models.URLField(null=True, blank=True, max_length=512, help_text="Site's Home URL Page")
enabled = models.BooleanField(default=True, help_text="Status for this Site")
hosts_nodes = models.BooleanField(default=True, help_text="Indicates whether or not the site host nodes")
hosts_users = models.BooleanField(default=True, help_text="Indicates whether or not the site manages user accounts")
# location = GeopositionField()
longitude = models.FloatField(null=True, blank=True)
latitude = models.FloatField(null=True, blank=True)
login_base = StrippedCharField(max_length=50, unique=True, help_text="Prefix for Slices associated with this Site")
is_public = models.BooleanField(default=True, help_text="Indicates the visibility of this site to other members")
abbreviated_name = StrippedCharField(max_length=80)
#deployments = models.ManyToManyField('Deployment', blank=True, related_name='sites')
deployments = models.ManyToManyField('Deployment', through='SiteDeployment', blank=True, help_text="Select which sites are allowed to host nodes in this deployment", related_name='sites')
tags = GenericRelation(Tag)
def __unicode__(self): return u'%s' % (self.name)
def can_update(self, user):
return user.can_update_site(self, allow=['pi'])
class SiteRole(PlCoreBase):
ROLE_CHOICES = (('admin','Admin'),('pi','PI'),('tech','Tech'),('billing','Billing'))
role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
def __unicode__(self): return u'%s' % (self.role)
class SitePrivilege(PlCoreBase):
user = models.ForeignKey('User', related_name='siteprivileges')
site = models.ForeignKey('Site', related_name='siteprivileges')
role = models.ForeignKey('SiteRole',related_name='siteprivileges')
xos_links = [ModelLink(Site,'site'),ModelLink('User','user'),ModelLink('Role','role')]
def __unicode__(self): return u'%s %s %s' % (self.site, self.user, self.role)
def save(self, *args, **kwds):
if not self.user.is_active:
raise PermissionDenied, "Cannot modify role(s) of a disabled user"
super(SitePrivilege, self).save(*args, **kwds)
def delete(self, *args, **kwds):
super(SitePrivilege, self).delete(*args, **kwds)
def can_update(self, user):
return user.can_update_site(self, allow=['pi'])
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = SitePrivilege.objects.all()
else:
sp_ids = [sp.id for sp in SitePrivilege.objects.filter(user=user)]
qs = SitePrivilege.objects.filter(id__in=sp_ids)
return qs
class Deployment(PlCoreBase):
#objects = Controllermanager()
#deleted_objects = DeploymentDeletionManager()
name = StrippedCharField(max_length=200, unique=True, help_text="Name of the Deployment")
#admin_user = StrippedCharField(max_length=200, null=True, blank=True, help_text="Username of an admin user at this deployment")
#admin_password = StrippedCharField(max_length=200, null=True, blank=True, help_text="Password of theadmin user at this deployment")
#admin_tenant = StrippedCharField(max_length=200, null=True, blank=True, help_text="Name of the tenant the admin user belongs to")
#auth_url = StrippedCharField(max_length=200, null=True, blank=True, help_text="Auth url for the deployment")
#backend_type = StrippedCharField(max_length=200, null=True, blank=True, help_text="Type of deployment, e.g. EC2, OpenStack, or OpenStack version")
#availability_zone = StrippedCharField(max_length=200, null=True, blank=True, help_text="OpenStack availability zone")
# smbaker: the default of 'allow all' is intended for evolutions of existing
# deployments. When new deployments are created via the GUI, they are
# given a default of 'allow site <site_of_creator>'
accessControl = models.TextField(max_length=200, blank=False, null=False, default="allow all",
help_text="Access control list that specifies which sites/users may use nodes in this deployment")
def __init__(self, *args, **kwargs):
super(Deployment, self).__init__(*args, **kwargs)
self.no_sync=True
def get_acl(self):
return AccessControlList(self.accessControl)
def test_acl(self, slice=None, user=None):
potential_users=[]
if user:
potential_users.append(user)
if slice:
potential_users.append(slice.creator)
for priv in slice.sliceprivileges.all():
if priv.user not in potential_users:
potential_users.append(priv.user)
acl = self.get_acl()
for user in potential_users:
if acl.test(user) == "allow":
return True
return False
@staticmethod
def select_by_acl(user):
ids = []
for deployment in Deployment.objects.all():
acl = deployment.get_acl()
if acl.test(user) == "allow":
ids.append(deployment.id)
return Deployment.objects.filter(id__in=ids)
def can_update(self, user):
return user.can_update_deployment(self)
def __unicode__(self): return u'%s' % (self.name)
class DeploymentRole(PlCoreBase):
#objects = DeploymentLinkManager()
#deleted_objects = DeploymentLinkDeletionManager()
ROLE_CHOICES = (('admin','Admin'),)
role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
def __unicode__(self): return u'%s' % (self.role)
class DeploymentPrivilege(PlCoreBase):
#objects = DeploymentLinkManager()
#deleted_objects = DeploymentLinkDeletionManager()
user = models.ForeignKey('User', related_name='deploymentprivileges')
deployment = models.ForeignKey('Deployment', related_name='deploymentprivileges')
role = models.ForeignKey('DeploymentRole',related_name='deploymentprivileges')
xos_links = [ModelLink(Deployment,'deployment'),ModelLink('User','user'),ModelLink('Role','role')]
class Meta:
unique_together = ('user', 'deployment', 'role')
def __unicode__(self): return u'%s %s %s' % (self.deployment, self.user, self.role)
def can_update(self, user):
return user.can_update_deployment(self)
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = DeploymentPrivilege.objects.all()
else:
dpriv_ids = [dp.id for dp in DeploymentPrivilege.objects.filter(user=user)]
qs = DeploymentPrivilege.objects.filter(id__in=dpriv_ids)
return qs
class ControllerRole(PlCoreBase):
#objects = ControllerLinkManager()
#deleted_objects = ControllerLinkDeletionManager()
ROLE_CHOICES = (('admin','Admin'),)
role = StrippedCharField(choices=ROLE_CHOICES, unique=True, max_length=30)
def __unicode__(self): return u'%s' % (self.role)
class Controller(PlCoreBase):
objects = ControllerManager()
deleted_objects = ControllerDeletionManager()
name = StrippedCharField(max_length=200, unique=True, help_text="Name of the Controller")
backend_type = StrippedCharField(max_length=200, help_text="Type of compute controller, e.g. EC2, OpenStack, or OpenStack version")
version = StrippedCharField(max_length=200, help_text="Controller version")
auth_url = StrippedCharField(max_length=200, null=True, blank=True, help_text="Auth url for the compute controller")
admin_user = StrippedCharField(max_length=200, null=True, blank=True, help_text="Username of an admin user at this controller")
admin_password = StrippedCharField(max_length=200, null=True, blank=True, help_text="Password of theadmin user at this controller")
admin_tenant = StrippedCharField(max_length=200, null=True, blank=True, help_text="Name of the tenant the admin user belongs to")
domain = StrippedCharField(max_length=200, null=True, blank=True, help_text="Name of the domain this controller belongs to")
rabbit_host = StrippedCharField(max_length=200, null=True, blank=True, help_text="IP address of rabbitmq server at this controller")
rabbit_user = StrippedCharField(max_length=200, null=True, blank=True, help_text="Username of rabbitmq server at this controller")
rabbit_password = StrippedCharField(max_length=200, null=True, blank=True, help_text="Password of rabbitmq server at this controller")
deployment = models.ForeignKey(Deployment,related_name='controllerdeployments')
xos_links = [ModelLink(Deployment, via='deployment')]
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
self.no_sync=True
def __unicode__(self): return u'%s %s %s' % (self.name, self.backend_type, self.version)
@property
def auth_url_v3(self):
if self.auth_url and self.auth_url[-1] == '/':
return '{}/v3/'.format('/'.join(self.auth_url.split('/')[:-2]))
else:
return '{}/v3/'.format('/'.join(self.auth_url.split('/')[:-1]))
@staticmethod
def select_by_user(user):
if user.is_admin:
qs = Controller.objects.all()
else:
deployments = [dp.deployment for dp in DeploymentPrivilege.objects.filter(user=user, role__role__in=['Admin', 'admin'])]
qs = Controller.objects.filter(deployment__in=deployments)
return qs
class SiteDeployment(PlCoreBase):
objects = ControllerLinkManager()
deleted_objects = ControllerLinkDeletionManager()
site = models.ForeignKey(Site,related_name='sitedeployments')
deployment = models.ForeignKey(Deployment,related_name='sitedeployments')
controller = models.ForeignKey(Controller, null=True, blank=True, related_name='sitedeployments')
availability_zone = StrippedCharField(max_length=200, null=True, blank=True, help_text="OpenStack availability zone")
xos_links = [ModelLink(Site,'site'),ModelLink(Deployment,'deployment'),ModelLink(Controller,'controller')]
class Meta:
unique_together = ('site', 'deployment', 'controller')
def __unicode__(self): return u'%s %s' % (self.deployment, self.site)
class ControllerSite(PlCoreBase):
site = models.ForeignKey(Site,related_name='controllersite')
controller = models.ForeignKey(Controller, null=True, blank=True, related_name='controllersite')
tenant_id = StrippedCharField(null=True, blank=True, max_length=200, db_index=True, help_text="Keystone tenant id")
xos_links = [ModelLink(Controller,via='controller'),ModelLink(Site,via='site')]
def delete(self, *args, **kwds):
super(ControllerSite, self).delete(*args, **kwds)
class Meta:
unique_together = ('site', 'controller')
class Diag(PlCoreBase):
name = StrippedCharField(max_length=200, help_text="Name of the synchronizer")
@property
def enacted(self):
return None
@enacted.setter
def enacted(self, value):
pass # Ignore sets, Diag objects are always pending.
|
{
"content_hash": "c24bf6daf930f665350fcb3e20ab8d56",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 191,
"avg_line_length": 42.26900584795322,
"alnum_prop": 0.6812396236856668,
"repo_name": "zdw/xos",
"id": "069fec99450692c431e66fc38b50d952712af622",
"size": "14456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "xos/core/models/site.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "API Blueprint",
"bytes": "36709"
},
{
"name": "CSS",
"bytes": "673508"
},
{
"name": "HTML",
"bytes": "744333"
},
{
"name": "JavaScript",
"bytes": "1221865"
},
{
"name": "M4",
"bytes": "50678"
},
{
"name": "Makefile",
"bytes": "9039"
},
{
"name": "Python",
"bytes": "1368068"
},
{
"name": "Shell",
"bytes": "31905"
}
],
"symlink_target": ""
}
|
from storlets.sbus.client.client import SBusClient, SBusResponse
__all__ = [
'SBusClient',
'SBusResponse'
]
|
{
"content_hash": "52ad759bb0d3929e7271203484dd7995",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 64,
"avg_line_length": 19.5,
"alnum_prop": 0.6923076923076923,
"repo_name": "openstack/storlets",
"id": "adf9e742043d942b1fc2293926279b09f3d71ecd",
"size": "707",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "storlets/sbus/client/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "31430"
},
{
"name": "Java",
"bytes": "184917"
},
{
"name": "Jupyter Notebook",
"bytes": "7689"
},
{
"name": "Makefile",
"bytes": "347"
},
{
"name": "Python",
"bytes": "579917"
},
{
"name": "Shell",
"bytes": "20127"
}
],
"symlink_target": ""
}
|
"""Model module for MinDiff Keras integration.
This Module provides the implementation of a MinDiffModel, a Model that
delegates its call method to another Model and adds a `min_diff_loss`
during training and optionally during evaluation.
"""
import inspect
import dill
import tensorflow as tf
from tensorflow_model_remediation.common import docs
from tensorflow_model_remediation.min_diff.keras import utils
from tensorflow_model_remediation.min_diff.keras.utils import structure_utils
from tensorflow_model_remediation.min_diff.losses import loss_utils
@tf.keras.utils.register_keras_serializable()
class MinDiffModel(tf.keras.Model):
# pyformat: disable
"""Model that adds one or more loss component(s) to another model during training.
Inherits from: `tf.keras.Model`
Arguments:
original_model: Instance of `tf.keras.Model` that will be trained with the
additional `min_diff_loss`.
loss: `dict` or single element of string(s) (name of loss) or
`min_diff.losses.MinDiffLoss` instance(s) that will be used to calculate
the `min_diff_loss`(es).
loss_weight: `dict` of scalars or single scalar applied to the
`min_diff_loss`(es) before being included in training.
predictions_transform: Optional if the output of `original_model` is a
`tf.Tensor`. Function that transforms the output of `original_model` after
it is called on MinDiff examples. The resulting predictions tensor is
what will be passed in to the `losses.MinDiffLoss`(es).
**kwargs: Named parameters that will be passed directly to the base
class' `__init__` function.
`MinDiffModel` wraps the model passed in, `original_model`, and adds a
component to the loss during training and optionally during evaluation.
### <a id=constructing_mindiffmodel></a>Construction
There are two ways to construct a `MinDiffModel` instance, the first is the
simplest and the most common:
1 - Directly wrap your model with `MinDiffModel`. This is the simplest usage
and is most likely what you will want to use (unless your original model has
some custom implementations that need to be taken into account).
```
import tensorflow as tf
model = tf.keras.Sequential([...])
model = MinDiffModel(model, ...)
```
In this case, all methods other than the ones listed below will use the
default implementations of `tf.keras.Model`.
If you are in this use case, the next section is not relevant to you and you
skip to the section on [usage](#using_mindiffmodel).
2 - Subclassing `MinDiffModel` to integrate custom implementations. This will
likely be needed if the original_model is itself a customized subclass of
`tf.keras.Model`. If that is the case and you want to preserve the custom
implementations, you can create a new custom class that inherits first from
`MinDiffModel` and second from your custom class.
```
import tensorflow as tf
class CustomSequential(tf.keras.Sequential):
def train_step(self, data):
print("In a custom train_step!")
super().train_step(data)
class CustomMinDiffModel(MinDiffModel, CustomSequential):
pass # No additional implementation is required.
model = CustomSequential([...])
model = CustomMinDiffModel(model, ...) # This will use the custom train_step.
```
If you need to customize methods defined by `MinDiffModel`, then you can
create a direct subclass and override whatever is needed.
```
import tensorflow as tf
class CustomMinDiffModel(MinDiffModel):
def unpack_min_diff_data(self, inputs):
print("In a custom MinDiffModel method!")
super().unpack_min_diff_data(inputs)
model = tf.keras.Sequential([...])
model = CustomMinDiffModel(model, ...) # This will use the custom
# unpack_min_diff_data method.
```
### <a id=multiple_applications></a>Multiple Applications of MinDiff
It is possible to apply MinDiff multiple times within a single instance of
`MinDiffModel`. To do so, you can pass in a dictionary of losses where keys
are the names of each MinDiff application and the values are the names or
instances of `losses.MinDiffLoss` that will be applied for each respective
MinDiff application.
Loss weights can be set as either one value that will be used for all
applications or with a dictionary that specifies weights for individual
applications. Weights not specified will default to 1.0.
```
import tensorflow as tf
model = tf.keras.Sequential([...])
model = MinDiffModel(model, loss={
"application1": min_diff.losses.MMDLoss(), # Loss for first application.
"application2": min_diff.losses.MMDLoss() # Loss for second application.
},
loss_weight=2.0) # 2.0 will used as the weight for all applications.
```
A `MinDiffModel` initialized as shown above will expect `min_diff_data` to
have a structure matching that of `loss` (i.e. a dictionary of inputs with
keys matching that of `loss`). See `MinDiffModel.compute_min_diff_loss` for
details.
### <a id=using_mindiffmodel></a>Usage
Once you have created an instance of `MinDiffModel`, it can be used almost
exactly the same way as the model it wraps. The main two exceptions to this
are:
- During training, the inputs must include `min_diff_data`, see
`MinDiffModel.compute_min_diff_loss` for details.
- Saving and loading a model can have slightly different behavior if you are
subclassing `MinDiffModel`. See `MinDiffModel.save` and
`MinDiffModel.save_original_model` for details.
Optionally, inputs containing `min_diff_data` can be passed in to `evaluate`
and `predict`. For the former, this will result in the `min_diff_loss`
appearing in the metrics. For `predict` this should have no visible effect.
"""
# pyformat: enable
def __init__(self,
original_model: tf.keras.Model,
loss,
loss_weight=1.0,
predictions_transform=None,
**kwargs):
"""Initializes a MinDiffModel instance.
Raises:
ValueError: If `predictions_transform` is passed in but not callable.
"""
# Roundabout way of accessing the Functional class.
functional_class = tf.keras.Sequential.__bases__[0]
# We need to handle a special case where a custom MinDiffModel class is
# created that is also a subclass of the Functional class. In this case, we
# need to make sure that args match what the Functional.__init__ requires
# (i.e. `inputs` and `outputs` args) and that the rest of the
# Functional.__init__ method is skipped (supported by passing in
# `skip_init=True`).
# This requires any __init__ methods to not do input validation and to
# pass through `skip_init`.
if (isinstance(self, functional_class) and
not isinstance(self, tf.keras.Sequential)):
try:
super(MinDiffModel, self).__init__(
inputs=None, outputs=None, skip_init=True, **kwargs)
tf.keras.Model.__init__(self, **kwargs)
except Exception as e:
raise type(e)(
"There was a problem initializing the MinDiffModel subclass "
"instance. This was likely caused by:\n"
" - The kwargs that were passed in were not valid according to "
"tf.keras.Model or a base of your custom Model.\n"
" - Some args validation or requirement in your custom Model "
"__init__ method is too strict.\n"
" - Your Model subclass is not passing through **kwargs (in "
"particular `skip_init`) to the super().__init__ invocation.\n"
"To fix this, either fix the args, loosen the requirements, or "
"make sure to pass **kwargs to calls with super. If this is not "
"possible, you may need to integrate MinDiff without using "
"MinDiffModel.\n"
"Error raised: {}".format(e))
else:
try:
super(MinDiffModel, self).__init__(**kwargs)
except Exception as e:
raise type(e)(
"There was a problem initializing the MinDiffModel instance. "
"This was likely caused by the kwargs that were passed in not "
"being valid according to tf.keras.Model.\n"
"Error raised: {}".format(e))
# Set _auto_track_sub_layers to true to ensure we track the
# original_model and MinDiff layers.
self._auto_track_sub_layers = True # Track sub layers.
self.built = True # This Model is built, original_model may or may not be.
# Masking, if any, is taken care of by original_model.
self._supports_masking = False
# Clear input_spec in case there is one. We cannot make any strong
# assertions because `min_diff_data` may or may not be included and can
# have different shapes since weight is optional.
self.input_spec = None
self._original_model = original_model
structure_utils.validate_min_diff_structure(loss, struct_name="loss")
self._loss = tf.nest.map_structure(loss_utils._get_loss, loss)
structure_utils.validate_min_diff_structure(
loss_weight, struct_name="loss_weight")
self._loss_weight = _conform_weights_to_losses(
self._loss, loss_weight, default_value=1.0)
self._min_diff_loss_metric = _create_unique_metrics(self._loss,
self.metrics)
if (predictions_transform is not None and
not callable(predictions_transform)):
raise ValueError("`predictions_transform` must be callable if passed "
"in, given: {}".format(predictions_transform))
self._predictions_transform = predictions_transform
@property
def predictions_transform(self):
"""Function to be applied on MinDiff predictions before calculating loss.
MinDiff predictions are the output of `original_model` on the MinDiff
examples (see `compute_min_diff_loss` for details). These might not
initially be a `tf.Tensor`, for example if the model is multi-output. If
this is the case, the predictions need to be converted into a `tf.Tensor`.
This can be done by selecting one of the outputs or by combining them in
some way.
```
# Pick out a specific output to use for MinDiff.
transform = lambda predictions: predictions["output2"]
model = MinDiffModel(..., predictions_transform=transform)
# test data imitating multi_output predictions
test_predictions = {
"output1": [1, 2, 3],
"output2": [4, 5, 6],
}
model.predictions_transform(test_predictions) # [4, 5, 6]
```
If no `predictions_transform` parameter is passed in (or `None` is used),
then it will default to the identity.
```
model = MinDiffModel(..., predictions_transform=None)
model.predictions_transform([1, 2, 3]) # [1, 2, 3]
```
The result of applying `predictions_transform` on the MinDiff predictions
must be a `tf.Tensor`. The `min_diff_loss` will be calculated on these
results.
"""
if self._predictions_transform is None:
return lambda predictions: predictions
return self._predictions_transform
@property
def original_model(self):
"""`tf.keras.Model` to be trained with the additional `min_diff_loss` term.
Inference and evaluation will also come from the results this model
provides.
"""
return self._original_model
def _call_original_model(self, inputs, training=None, mask=None):
"""Calls the original model with appropriate args."""
arg_tuples = [("training", training,
self.original_model._expects_training_arg)]
# Check if the original model call signature uses "mask" and pass mask to
# the original model if present.
if "mask" in inspect.getfullargspec((self.original_model.call)).args:
arg_tuples.append(("mask", mask, self.original_model._expects_mask_arg))
kwargs = {name: value for name, value, expected in arg_tuples if expected}
return self.original_model(inputs, **kwargs)
def unpack_original_inputs(self, inputs):
# pyformat: disable
"""Extracts original_inputs from `inputs`.
Arguments:
inputs: `inputs` as described in `MinDiffModel.call`.
Identifies whether `min_diff_data` is included in `inputs`. If it is, then
what is returned is the component that is only meant to be used in the call
to `original_model`.
```
model = ... # MinDiffModel.
inputs = ... # Batch containing `min_diff_data`
# Extracts component that is only meant to be passed to `original_model`.
original_inputs = model.unpack_original_inputs(inputs)
```
If `min_diff_data` is not included, then `inputs` is returned directly.
```
model = ... # MinDiffModel.
# Test batch without `min_diff_data` (i.e. just passing in a simple array)
print(model.unpack_original_inputs([1, 2, 3])) # [1, 2, 3]
```
The default implementation is a pure wrapper around
`min_diff.keras.utils.unpack_original_inputs`. See there for implementation
details.
Returns:
Inputs to be used in the call to `original_model`.
"""
# pyformat: enable
return utils.unpack_original_inputs(inputs)
def unpack_min_diff_data(self, inputs):
# pyformat: disable
"""Extracts `min_diff_data` from `inputs` if present or returns `None`.
Arguments:
inputs: `inputs` as described in `MinDiffModel.call`.
Identifies whether `min_diff_data` is included in `inputs` and returns
`min_diff_data` if it is.
```
model = ... # MinDiffModel.
inputs = ... # Batch containing `min_diff_data`
min_diff_data = model.unpack_min_diff_data(inputs)
```
If `min_diff_data` is not included, then `None` is returned.
```
model = ... # MinDiffModel.
# Test batch without `min_diff_data` (i.e. just passing in a simple array)
print(model.unpack_min_diff_data([1, 2, 3])) # None
```
The default implementation is a pure wrapper around
`min_diff.keras.utils.unpack_min_diff_data`. See there for implementation
details.
Returns:
`min_diff_data` to be passed to `MinDiffModel.compute_min_diff_loss` if
present or `None` otherwise.
"""
# pyformat: enable
return utils.unpack_min_diff_data(inputs)
def compute_min_diff_loss(self, min_diff_data, training=None, mask=None):
# pyformat: disable
"""Computes `min_diff_loss`(es) corresponding to `min_diff_data`.
Arguments:
min_diff_data: Tuple of data or valid MinDiff structure of tuples as
described below.
training: Boolean indicating whether to run in training or inference mode.
See `tf.keras.Model.call` for details.
mask: Mask or list of masks as described in `tf.keras.Model.call`. These
will be applied when calling the `original_model`.
`min_diff_data` must have a structure (or be a single element) matching that
of the `loss` parameter passed in during initialization. Each element of
`min_diff_data` (and `loss`) corresponds to one application of MinDiff.
Like the input requirements described in `tf.keras.Model.fit`, each element
of `min_diff_data` must be a tuple of length 2 or 3. The tuple will be
unpacked using the standard `tf.keras.utils.unpack_x_y_sample_weight`
function:
```
min_diff_data_elem = ... # Single element from a batch of min_diff_data.
min_diff_x, min_diff_membership, min_diff_sample_weight = (
tf.keras.utils.unpack_x_y_sample_weight(min_diff_data_elem))
```
The components are defined as follows:
- `min_diff_x`: inputs to `original_model` to get the corresponding MinDiff
predictions.
- `min_diff_membership`: numerical [batch_size, 1] `Tensor` indicating which
group each example comes from (marked as `0.0` or `1.0`).
- `min_diff_sample_weight`: Optional weight `Tensor`. The weights will be
applied to the examples during the `min_diff_loss` calculation.
For each application of MinDiff, the `min_diff_loss` is ultimately
calculated from the MinDiff predictions which are evaluated in the
following way:
```
... # In compute_min_diff_loss call.
min_diff_x = ... # Single batch of MinDiff examples.
# Get predictions for MinDiff examples.
min_diff_predictions = self.original_model(min_diff_x, training=training)
# Transform the predictions if needed. By default this is the identity.
min_diff_predictions = self.predictions_transform(min_diff_predictions)
```
Returns:
Scalar (if only one) or list of `min_diff_loss` values calculated from
`min_diff_data`.
Raises:
ValueError: If the structure of `min_diff_data` does not match that of the
`loss` that was passed to the model during initialization.
ValueError: If the transformed `min_diff_predictions` is not a
`tf.Tensor`.
"""
# pyformat: enable
structure_utils._assert_same_min_diff_structure(min_diff_data, self._loss)
# Flatten everything and calculate min_diff_loss for each application.
flat_data = structure_utils._flatten_min_diff_structure(min_diff_data)
flat_losses = structure_utils._flatten_min_diff_structure(self._loss)
flat_weights = structure_utils._flatten_min_diff_structure(
self._loss_weight)
flat_metrics = structure_utils._flatten_min_diff_structure(
self._min_diff_loss_metric)
min_diff_losses = [
self._compute_single_min_diff_loss(data, loss, weight, metric, training,
mask) for data, loss, weight, metric
in zip(flat_data, flat_losses, flat_weights, flat_metrics)
]
# If there is only one application return a scalar rather than a list.
if len(min_diff_losses) == 1:
min_diff_losses = min_diff_losses[0]
return min_diff_losses
def _compute_single_min_diff_loss(self,
min_diff_data,
loss,
loss_weight,
min_diff_loss_metric,
training=None,
mask=None):
"""Computes a single `min_diff_loss` given a loss, weight, and data.
This will be called for each application of MinDiff. See
`MinDiffModel.compute_min_diff_loss` for details.
"""
x, membership, sample_weight = (
tf.keras.utils.unpack_x_y_sample_weight(min_diff_data))
predictions = self._call_original_model(x, training=training, mask=mask)
# Clear any losses added when calling the original model on the MinDiff
# examples. The right losses, if any, will be added when the original_model
# is called on the original inputs.
self._clear_losses()
predictions = self.predictions_transform(predictions)
if not isinstance(predictions, tf.Tensor):
err_msg = (
"MinDiff `predictions` meant for calculating the `min_diff_loss` "
"must be a Tensor, given: {}\n".format(predictions))
if self._predictions_transform is None:
err_msg += (
"This is due to the fact that `original_model` does not return "
"a Tensor either because it is multi output or because it has some "
"custom implementation. To handle this, pass in a "
"`predictions_transform` that converts the result into the tensor "
"the `min_diff_loss` should be calculated on.")
else:
err_msg += ("This is due to the fact that the provided "
"`predictions_transform` parameter does not return a "
"Tensor when given the output of `original_model`.")
err_msg += "\nSee `MinDiffModel` for additional documentation."
raise ValueError(err_msg)
min_diff_loss = loss_weight * loss(
predictions=predictions,
membership=membership,
sample_weight=sample_weight)
min_diff_loss_metric.update_state(min_diff_loss)
return min_diff_loss
@docs.do_not_doc_in_subclasses
def call(self, inputs, training=None, mask=None):
# pyformat: disable
"""Calls `original_model` with optional `min_diff_loss` as regularization loss.
Args:
inputs: Inputs to original_model, optionally containing `min_diff_data` as
described below.
training: Boolean indicating whether to run in training or inference mode.
See `tf.keras.Model.call` for details.
mask: Mask or list of masks as described in `tf.keras.Model.call`.
Note: Like `tf.keras.Model.call`, this method should not be called directly.
To call a model on an input, always use the `__call__` method,
i.e. `model(inputs)`, which relies on the `call` method internally.
This method should be used the same way as `tf.keras.Model.call`. Depending
on whether you are in train mode, `inputs` may need to include
`min_diff_data` (see `MinDiffModel.compute_min_diff_data` for details on
what form that needs to take).
- If `training=True`: `inputs` must contain `min_diff_data` (see details
below).
- If `training=False`: including `min_diff_data` is optional.
If present, the `min_diff_loss` is added by calling `self.add_loss` and will
show up in `self.losses`.
```
model = ... # MinDiffModel.
dataset = ... # Dataset containing min_diff_data.
for batch in dataset.take(1):
model(batch, training=True)
model.losses[0] # First element(s) will be the min_diff_loss(es).
```
Including `min_diff_data` in `inputs` implies that
`MinDiffModel.unpack_original_inputs` and
`MinDiffModel.unpack_min_diff_data` behave as expected when called on
`inputs` (see methods for details).
This condition is satisfied with the default implementations if you use
`min_diff.keras.utils.pack_min_diff_data` to create the dataset that
includes `min_diff_data`.
Returns:
A `tf.Tensor` or nested structure of `tf.Tensor`s according to the
behavior `original_model`. See `tf.keras.Model.call` for details.
Raises:
ValueError: If `training` is set to `True` but `inputs` does not include
`min_diff_data`.
"""
# pyformat: enable
original_inputs = self.unpack_original_inputs(inputs)
min_diff_data = self.unpack_min_diff_data(inputs)
# If training is True, we require min_diff_data to be available.
if training and min_diff_data is None:
raise ValueError(
"call `inputs` must contain MinDiffData during training.")
# Add min_diff_loss if min_diff_data is available.
if min_diff_data is not None:
min_diff_loss = self.compute_min_diff_loss(
min_diff_data, training=training)
# Add min_diff_loss(es) as regularization loss(es).
tf.nest.map_structure(self.add_loss, min_diff_loss)
return self._call_original_model(
original_inputs, training=training, mask=mask)
@docs.do_not_generate_docs
def test_step(self, data, *args, **kwargs):
"""The logic for one evaluation step.
Has the exact same behavior as `tf.keras.Model.test_step` with the one
exception that it removes the 'min_diff_loss' metric(s) if `min_diff_data`
is not available.
"""
metrics = super(MinDiffModel, self).test_step(data, *args, **kwargs)
# If there is no min_diff_data, remove the min_diff_loss metric.
x, _, _ = tf.keras.utils.unpack_x_y_sample_weight(data)
if self.unpack_min_diff_data(x) is None:
for metric in tf.nest.flatten(self._min_diff_loss_metric):
if metric.name in metrics:
del metrics[metric.name]
return metrics
# We are overriding this solely to provide complete documentation on the
# limitations of saving this way as opposed to behavior of normal models.
def save(self, *args, **kwargs):
"""Exports the model as described in `tf.keras.Model.save`.
For subclasses of `MinDiffModel` that have not been registered as Keras
objects, this method will likely be what you want to call to continue
training your model with MinDiff after having loaded it. If you want to use
the loaded model purely for inference, you will likely want to use
`MinDiffModel.save_original_model` instead.
Note: A model loaded from the output of
`UnregisteredMinDiffModelSubclass.save` is slightly different from the
original instance in that it will require `min_diff_data` to be included
in inputs to all functions, even `MinDiffModel.evaluate` and
`MinDiffModel.predict`.
The exception noted above for unregistered `MinDiffModel` subclasses is the
only difference with `tf.keras.Model.save`. To avoid these subtle
differences, we strongly recommend registering `MinDiffModel` subclasses as
Keras objects. See the documentation of
`tf.keras.utils.register_keras_serializable` for details.
"""
return super(MinDiffModel, self).save(*args, **kwargs)
def save_original_model(self, *args, **kwargs):
"""Exports the `original_model`.
Exports the `original_model`. When loaded, this model will be the type of
`original_model` and will no longer be able to train or evaluate with
MinDiff data.
Note: Since a model loaded from the output of
`MinDiffModel.save_original_model` will be an instance of the same type as
`original_model`, you will need to rewrap it with `MinDiffModel` if you want
to train it more with MinDiff.
"""
return self.original_model.save(*args, **kwargs)
def compile(self, *args, **kwargs):
"""Compile both `self` and `original_model` using the same parameters.
See `tf.keras.Model.compile` for details.
"""
self.original_model.compile(*args, **kwargs)
return super(MinDiffModel, self).compile(*args, **kwargs)
@docs.do_not_doc_in_subclasses
def get_config(self):
"""Creates a config dictionary for the `MinDiffModel` instance.
Note: This will ignore anything resulting from the kwargs passed in at
initialization time or changes made to new attributes added afterwards. If
this is problematic you will need to subclass MinDiffModel and override this
method to account for these.
Any subclass with additional attributes will need to override this method.
When doing so, users will mostly likely want to first call `super`.
Returns:
A config dictionary for the `MinDiffModel` isinstance.
Raises:
Exception: If calling `original_model.get_config()` raises an error. The
type raised will be the same as that of the original error.
"""
# Check that original_model.get_config is implemented and raise a helpful
# error message if not.
try:
_ = self._original_model.get_config()
except Exception as e:
raise type(e)(
"MinDiffModel cannot create a config because `original_model` has "
"not implemented get_config() or has an error in its implementation."
"\nError raised: {}".format(e))
# Try super.get_config if implemented. In most cases it will not be.
try:
config = super(MinDiffModel, self).get_config()
except NotImplementedError:
config = {}
config.update({
"original_model": self._original_model,
"loss": self._loss,
"loss_weight": self._loss_weight,
"name": self.name,
})
if self._predictions_transform is not None:
config["predictions_transform"] = dill.dumps(self._predictions_transform)
return {k: v for k, v in config.items() if v is not None}
@classmethod
def _deserialize_config(cls, config):
"""Takes a config of attributes and deserializes as needed.
Transforms are deserialized using the `dill` module. The `original_model`
and `loss` are deserialized using the
`tf.keras.utils.deserialize_keras_object` function.
Note: This is a convenience method that assumes that the only elements that
need additional deserialization are `predictions_transform`, original_model`
and `loss`. If this is not the case for a given subclass this method (or
`from_config`) will need to be implemented directly.
"""
def _deserialize_value(key, value):
if key == "predictions_transform":
return dill.loads(value)
return value # No transformation applied.
return {k: _deserialize_value(k, v) for k, v in config.items()}
@classmethod
@docs.do_not_doc_in_subclasses
def from_config(cls, config):
"""Creates a `MinDiffModel` instance from the config.
Any subclass with additional attributes or a different initialization
signature will need to override this method or `get_config`.
Returns:
A new `MinDiffModel` instance corresponding to `config`.
"""
config = cls._deserialize_config(config)
return cls(**config)
def _unique_metric_name(name, existing_metrics):
"""Returns a unique name given the existing metric names."""
existing_names = set([metric.name for metric in existing_metrics])
proposed_name = name
cnt = 1 # Start incrementing with 1.
# Increment name suffix until the name is unique.
while proposed_name in existing_names:
proposed_name = name + "_" + str(cnt)
cnt += 1
return proposed_name
def _create_unique_metrics(loss, existing_metrics):
"""Create uniquely named MinDiff metric(s) corresponding to loss parameter."""
if not isinstance(loss, dict):
return tf.keras.metrics.Mean(
_unique_metric_name("min_diff_loss", existing_metrics))
min_diff_metrics = []
for name in loss.keys():
min_diff_metrics.append(
tf.keras.metrics.Mean(
_unique_metric_name(name + "_min_diff_loss", existing_metrics)))
return tf.nest.pack_sequence_as(loss, min_diff_metrics)
def _conform_weights_to_losses(loss, loss_weight, default_value):
"""Conforms weights to match structure of losses.
Shape weights to match the structure of `loss` if possible. If `loss_weight`
is a single value, it will be broadcast for all losses. If `loss_weight` is
`None` or has missing entries, `default_value` will be used.
Args:
loss: loss (possible nested) that weights will be conformed to.
loss_weight: weight that will be conformed to loss structure. If only a
single value, it will be broadcast for all losses. If `None`, it will be
replaced by `default_value`.
default_value: Value used if `loss_weight` is `None` or if some weights are
missing for certain losses.
Returns:
Weight corresponding to `loss` structure.
"""
# Validate loss (loss_weights will be implicitly validated)
structure_utils.validate_min_diff_structure(loss, struct_name="loss")
# If loss_weight is unnested, then broadcast to all values of loss.
if not tf.nest.is_nested(loss_weight):
if loss_weight is None:
loss_weight = default_value
return tf.nest.map_structure(lambda _: loss_weight, loss)
# If execution reaches here, then loss_weight is nested (a dict).
# If loss is not nested, then raise an error (since loss_weight is a nested).
if not tf.nest.is_nested(loss):
try:
tf.nest.assert_same_structure(loss, loss_weight)
except Exception as e:
raise ValueError("`loss` and `loss_weight` do not have matching "
"structures: \n{}".format(e))
# At this point, we should be guaranteed that the two structures are dicts if
# they are valid MinDiff structures. However, in case they are not, we assert
# that they are both dicts (this also helps be future proof since it will
# catch the broken assumption immediately if the validity definition changes).
# Note: As is, it should be impossible to get to this point. The only way it
# would is if this function is called without validating or if the
# definition of a valid MinDiff structure has changed.
if not (isinstance(loss, dict) and isinstance(loss_weight, dict)):
raise ValueError(
"One of `loss` and `loss_weight` is neither a single element nor a "
"dict. This should never happen if they are valid MinDiff structures. "
"If you think this is a valid use case (e.g. if the definition has "
"changed but this piece of code is out of sync), please file an issue "
"so we can look at it and make the appropriate fix.")
# Save copy to not alter the original dict.
loss_weight = loss_weight.copy()
# First, we make sure to set defaults for any losses that do not have
# corresponding weights. Raise an error if there are weights with keys that
# don't correspond to losses.
if not set(loss_weight.keys()) <= set(loss.keys()):
raise ValueError(
"`loss_weight` contains keys that do not correspond to losses:"
"\n\nloss: {}\n\nloss_weight: {}".format(loss, loss_weight))
# Provide defaults for any missing weights.
for key in loss.keys():
if key not in loss_weight:
loss_weight[key] = default_value
# At this point, we should be guaranteed that the two structures match if they
# are valid MinDiff structures. However, in case they are not we assert that
# they match.
try:
tf.nest.assert_same_structure(loss, loss_weight)
except Exception as e:
raise ValueError(
"`loss` and `loss_weight` (potentially with default weights added) "
"do not have matching structures: \n{}".format(e))
return loss_weight
|
{
"content_hash": "c6993d1828a2fc3d0443ddcb3e147e81",
"timestamp": "",
"source": "github",
"line_count": 850,
"max_line_length": 84,
"avg_line_length": 39.35764705882353,
"alnum_prop": 0.679589884617684,
"repo_name": "tensorflow/model-remediation",
"id": "92a0a78d488a0273a10ba357e1986ac313742625",
"size": "34045",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow_model_remediation/min_diff/keras/models/min_diff_model.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "508063"
}
],
"symlink_target": ""
}
|
from declined_transaction_exception import DeclinedTransactionException
class DeclinedRefundException(DeclinedTransactionException):
"""
Represents an error response from a refund call.
"""
def __init__(self, status_code, response_body, errors):
if errors is not None:
super(DeclinedRefundException, self).__init__(status_code,
response_body,
errors.error_id,
errors.errors,
DeclinedRefundException.__create_message(
errors))
else:
super(DeclinedRefundException, self).__init__(status_code,
response_body,
None, None,
DeclinedRefundException.__create_message(
errors))
self.__errors = errors
@staticmethod
def __create_message(errors):
if errors is not None:
refund = errors.refund_result
else:
refund = None
if refund is not None:
return "declined refund '" + refund.id + "' with status '" + refund.status + "'"
else:
return "the Ingenico ePayments platform returned a declined refund response"
@property
def refund_result(self):
"""
:return: The result of creating a refund if available, otherwise None.
"""
if self.__errors is None:
return None
else:
return self.__errors.refund_result
|
{
"content_hash": "60ddc5598b388740af36ab68f9a67908",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 42,
"alnum_prop": 0.454004329004329,
"repo_name": "Ingenico-ePayments/connect-sdk-python2",
"id": "b7fda5ad33534baa4a9aa928e6caa82d445ed843",
"size": "1848",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ingenico/connect/sdk/declined_refund_exception.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1733005"
}
],
"symlink_target": ""
}
|
import re
import sys
import unicodedata
import StringIO
# An unsafe character is one outside the range that everybody can handle; we
# escape them in groups so that when we escape them legitimate surrogate pairs
# get represented as \Uxxxxyyyy escapes.
_UNSAFE_CHARACTERS = re.compile(u"[^\u0001-\ud7ff\ue000-\ufdcf\ufdf0-\ufffd]+")
_NON_ASCII_BYTE = re.compile("[\x80-\xff]")
_VALID_NAME_ASCII_CHAR = re.compile("[A-Za-z0-9._ -]")
_ASCII_CHAR = re.compile("[\x00-\x7f]")
# Letter, Mark, Number, Punctuation, Symbol
_VALID_NAME_CATEGORIES = re.compile("^[LMNPS]")
class ConversionError(Exception):
pass
def _escape(g):
return g.group(0).encode("unicode_escape").decode()
def _escape_byte(g):
return "\\x%02x" % ord(g.group(0))
def _decode_escaped(s, encoding):
# Note that cStringIO wouldn't work here, because it doesn't handle Unicode
out = StringIO.StringIO()
pos = 0
while pos < len(s):
try:
out.write(s[pos:].decode("utf8"))
pos = len(s)
except UnicodeDecodeError, e:
out.write(s[pos:pos + e.start].decode("utf8"))
out.write(_NON_ASCII_BYTE.sub(_escape_byte, s[pos + e.start:pos + e.end]))
pos += e.end
return out.getvalue()
def decode(s, encoding="utf8", escape=False):
"""
@param s the str object to decode into Unicode
@param encoding the encoding to use (defaults to UTF-8)
@param escape if True,
"""
try:
u = s.decode(encoding)
except UnicodeDecodeError, e:
if escape:
u = _decode_escaped(s, encoding)
else:
raise ConversionError(e.reason)
if escape:
return escape_unsafe(u)
else:
m = _UNSAFE_CHARACTERS.search(u)
if m:
# Do a bunch of work here to get an explaination about what is wrong
c = ord(u[m.start(0)])
if c == 0:
raise ConversionError('text contains NUL byte')
elif c >= 0xd800 and c < 0xe000:
# Detect non-BMP characters in UCS-2 Python
if sys.maxunicode == 0xffff and c < 0xdc00:
if m.start(0) + 1 < m.end(0):
c2 = ord(u[m.start(0) + 1])
if c2 >= 0xdc00 and c2 < 0xe000:
raise ConversionError('text contains characters not in basic multilingual plane')
raise ConversionError('text contains unpaired surrogates')
elif c > 0xffff:
raise ConversionError('text contains characters not in basic multilingual plane')
else:
# Byte reversed BOM, etc.
raise ConversionError('text contains invalid Unicode codepoints')
return u
def escape_unsafe(u):
"""Encode any characters in a string that might cause problems for Reinteract
as \\u<nnnn> or \\U<nnnnnnnn> escape sequences. This includes embedded NULs, characters
not in the BMP and codepoints that are defined by the Unicode spec as not
valid characters."""
return _UNSAFE_CHARACTERS.sub(_escape, u)
def validate_name(name):
# Remove surrounding whitespace
name = name.strip()
if name == "":
raise ValueError("Name cannot be empty")
# Replace series of whitespace with a single space
name = re.compile(r"\s+", re.UNICODE).sub(" ", name)
bad_chars = set()
for c in name:
if not _VALID_NAME_ASCII_CHAR.match(c):
if _ASCII_CHAR.match(c) or _UNSAFE_CHARACTERS.match(c):
bad_chars.add(c)
else:
category = unicodedata.category(c)
if not _VALID_NAME_CATEGORIES.match(category):
bad_chars.add(c)
bad = ", ".join(("'" + c + "'" for c in bad_chars))
if len(bad_chars) == 1:
raise ValueError("Name contains invalid character: %s" % bad)
elif len(bad_chars) > 0:
raise ValueError("Name contains invalid characters: %s" % bad)
elif name.startswith("."):
raise ValueError("Name cannot start with a '.'")
return name
def canonicalize_filename(filename):
if not isinstance(filename, unicode):
filename = filename.decode("UTF-8")
# MacOS X uses decomposed UTF-8 to store filenames
filename = unicodedata.normalize('NFC', filename)
return filename
######################################################################
|
{
"content_hash": "5c4ce613b210f0f7c95d8668fb8a6b4f",
"timestamp": "",
"source": "github",
"line_count": 131,
"max_line_length": 109,
"avg_line_length": 33.74045801526717,
"alnum_prop": 0.5938914027149321,
"repo_name": "alexey4petrov/reinteract",
"id": "4dff6987ddcf3c62ee0fb787617568d5f46c79dc",
"size": "5929",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/reinteract/reunicode.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "11366"
},
{
"name": "Objective-C",
"bytes": "24614"
},
{
"name": "Python",
"bytes": "621540"
},
{
"name": "Shell",
"bytes": "160"
}
],
"symlink_target": ""
}
|
from twisted.plugin import IPlugin
from txircd.module_interface import IMode, IModuleData, Mode, ModuleData
from txircd.utils import ModeType
from zope.interface import implementer
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
@implementer(IPlugin, IModuleData, IMode)
class SecretMode(ModuleData, Mode):
name = "SecretMode"
core = True
affectedActions = { "displaychannel": 20,
"showchannel-whois": 20 }
def channelModes(self) -> List[Union[Tuple[str, ModeType, Mode], Tuple[str, ModeType, Mode, int, str]]]:
return [ ("s", ModeType.NoParam, self) ]
def actions(self) -> List[Tuple[str, int, Callable]]:
return [ ("modeactioncheck-channel-s-displaychannel", 1, self.chanIsSecretList),
("modeactioncheck-channel-s-showchannel-whois", 1, self.chanIsSecretWhois) ]
def chanIsSecretList(self, channel: "IRCChannel", displayData: Dict[str, Any], sameChannel: "IRCChannel", user: "IRCUser", usedSearchMask: bool) -> Union[str, bool, None]:
if "s" in channel.modes:
return True
return None
def chanIsSecretWhois(self, channel: "IRCChannel", sameChannel: "IRCChannel", queryUser: "IRCUser", targetUser: "IRCUser") -> Union[str, bool, None]:
if "s" in channel.modes:
return True
return None
def apply(self, actionName: str, channel: "IRCChannel", param: str, *params: Any) -> Union[None, Optional[bool]]: # Union of return types of each affected action
if actionName == "displaychannel":
displayData, sameChannel, user, usedSearchMask = params
if user not in channel.users:
displayData.clear() # Let's make it not show the channel at all
return
if actionName == "showchannel-whois":
sameChannel, queryUser, targetUser = params
if queryUser not in channel.users:
return False
return None
secretMode = SecretMode()
|
{
"content_hash": "980f9f7feb461487c0ace27ac27d63c4",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 172,
"avg_line_length": 42.48837209302326,
"alnum_prop": 0.7197591680350302,
"repo_name": "Heufneutje/txircd",
"id": "79fa44c4b65be49d84a7e34269406b86fa459d5a",
"size": "1827",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev/next",
"path": "txircd/modules/rfc/cmode_s.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "547"
},
{
"name": "Python",
"bytes": "792279"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
import threading
import sys
def merge_dicts(*dicts):
out = {}
for d in dicts:
if not d:
continue
for k, v in iteritems(d):
out[k] = v
return out
class memoize(object):
"""
Memoize the result of a property call.
>>> class A(object):
>>> @memoize
>>> def func(self):
>>> return 'foo'
"""
def __init__(self, func):
self.__name__ = func.__name__
self.__module__ = func.__module__
self.__doc__ = func.__doc__
self.func = func
def __get__(self, obj, type=None):
if obj is None:
return self
d, n = vars(obj), self.__name__
if n not in d:
d[n] = self.func(obj)
return d[n]
def once(func):
"""
Runs a thing once and once only.
"""
lock = threading.Lock()
def new_func(*args, **kwargs):
if new_func.called:
return
with lock:
if new_func.called:
return
rv = func(*args, **kwargs)
new_func.called = True
return rv
new_func = update_wrapper(new_func, func)
new_func.called = False
return new_func
from .apitracer import request_tracer as zipkin_tracer
|
{
"content_hash": "d67598b66cb4e21a9a24ec3231ae8d45",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 54,
"avg_line_length": 20.453125,
"alnum_prop": 0.5019098548510313,
"repo_name": "harkishan81001/py-instrumenting",
"id": "8b43c3b82b48ba99395920c63418f1342ac857a3",
"size": "1309",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytracing/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "17641"
}
],
"symlink_target": ""
}
|
"""
Django settings for project_template project.
Generated by 'django-admin startproject' using Django 1.9.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<SOME-SECRET-KEY>'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
]
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project_template.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project_template.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.'
'password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.'
'password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.'
'password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.'
'password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
|
{
"content_hash": "462ad6a5473fdf5cb67799bea81c401b",
"timestamp": "",
"source": "github",
"line_count": 109,
"max_line_length": 78,
"avg_line_length": 26.68807339449541,
"alnum_prop": 0.6789274664833276,
"repo_name": "CorrosiveKid/django_project_template",
"id": "827495b76861ccfd9e88f18a4f7b6ef72cf1739f",
"size": "2909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_template/settings/common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5796"
}
],
"symlink_target": ""
}
|
import os
import multiprocessing
from setuptools import setup, find_packages
with open(
os.path.join(
os.path.dirname(__file__),
'requirements.txt'
)
) as f:
required = f.read().splitlines()
setup(
name='twoline-utils',
version='0.1',
url='http://github.com/latestrevision/twoline-utils/',
description='Utils for http://github.com/latestrevision/twoline/',
author='Adam Coddington',
author_email='me@adamcoddington.net',
classifiers=[
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Utilities',
],
install_requires=required,
packages=find_packages(),
entry_points={'console_scripts': [
'tlu = twoline_utils.cmdline:run_from_cmdline']},
test_suite='nose.collector',
tests_require=[
'nose',
]
)
|
{
"content_hash": "3807eb8fd5755bfdae7b5561e52a32a0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 70,
"avg_line_length": 25.82857142857143,
"alnum_prop": 0.625,
"repo_name": "coddingtonbear/twoline-utils",
"id": "0162584a601a3a20dfe056ec08970378cf5bc7dd",
"size": "904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10428"
}
],
"symlink_target": ""
}
|
import unittest
import os
import time
from lib.util.mysqlBaseTestCase import mysqlBaseTestCase
server_requirements = [[],[],[]]
server_requests = {'join_cluster':[(0,1), (0,2)]}
servers = []
server_manager = None
test_executor = None
class basicTest(mysqlBaseTestCase):
def test_basic1(self):
self.servers = servers
master_server = servers[0]
other_nodes = servers[1:] # this can be empty in theory: 1 node
time.sleep(5)
test_cmd = "./gendata.pl --spec=conf/percona/percona_no_blob.zz "
retcode, output = self.execute_randgen(test_cmd, test_executor, servers)
self.assertTrue(retcode==0, output)
# check 'master'
query = "SHOW TABLES IN test"
retcode, master_result_set = self.execute_query(query, master_server)
self.assertEqual(retcode,0, master_result_set)
expected_result_set = (('A',), ('AA',), ('B',), ('BB',), ('C',), ('CC',), ('D',), ('DD',))
self.assertEqual( master_result_set
, expected_result_set
, msg = (master_result_set, expected_result_set)
)
time.sleep(1)
master_slave_diff = self.check_slaves_by_checksum(master_server, other_nodes)
self.assertEqual(master_slave_diff, None, master_slave_diff)
|
{
"content_hash": "4d79a29a57c728766d63482c2d57ceb0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 98,
"avg_line_length": 38.05714285714286,
"alnum_prop": 0.5998498498498499,
"repo_name": "jonzobrist/Percona-Server-5.1",
"id": "6531f27d64b8347d49e9d93ac31a907e6f9bbc95",
"size": "2194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "kewpie/percona_tests/cluster_basic/basic_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "63359"
},
{
"name": "C",
"bytes": "21629542"
},
{
"name": "C#",
"bytes": "186518"
},
{
"name": "C++",
"bytes": "26608206"
},
{
"name": "JavaScript",
"bytes": "34135"
},
{
"name": "Objective-C",
"bytes": "73098"
},
{
"name": "Perl",
"bytes": "2396066"
},
{
"name": "Puppet",
"bytes": "447114"
},
{
"name": "Python",
"bytes": "1181762"
},
{
"name": "R",
"bytes": "34454"
},
{
"name": "Racket",
"bytes": "2416"
},
{
"name": "Scilab",
"bytes": "7740"
},
{
"name": "Shell",
"bytes": "1287942"
}
],
"symlink_target": ""
}
|
def read_part_of_speech_file(filename):
'''Read a part-of-speech file and return a list of (pos, word) pairs.'''
with open(filename) as pos_file:
return [line.split() for line in pos_file]
def get_predictions(test_filename, predict_sentence):
'''Given an HMM, compute predictions for each word in the test data.'''
sentence = []
true_poses = []
for true_pos, word in (read_part_of_speech_file(test_filename)[1:]):
if word != '<s>':
sentence.append(word)
true_poses.append(true_pos)
else:
predictions = predict_sentence(sentence)
for word, pos, true_pos in zip(sentence, predictions, true_poses):
yield word, pos, true_pos
yield ('<s>', '<s>', '<s>')
sentence = []
true_poses = []
|
{
"content_hash": "8c23c44702404b7488d283331613e262",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 74,
"avg_line_length": 36,
"alnum_prop": 0.6309523809523809,
"repo_name": "Unknowncmbk/HiddenMarkovModel",
"id": "000cd1482645a3fe2247f6b7b722af3e36154e1e",
"size": "890",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "18958"
}
],
"symlink_target": ""
}
|
""" Tests for the Deep explainer.
"""
from urllib.error import HTTPError
from packaging import version
import numpy as np
import pandas as pd
import pytest
import shap
from shap import DeepExplainer
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
# pylint: disable=import-outside-toplevel, no-name-in-module, import-error
def test_tf_eager():
""" This is a basic eager example from keras.
"""
tf = pytest.importorskip('tensorflow')
if version.parse(tf.__version__) >= version.parse("2.4.0"):
pytest.skip("Deep explainer does not work for TF 2.4 in eager mode.")
x = pd.DataFrame({"B": np.random.random(size=(100,))})
y = x.B
y = y.map(lambda zz: chr(int(zz * 2 + 65))).str.get_dummies()
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Dense(10, input_shape=(x.shape[1],), activation="relu"))
model.add(tf.keras.layers.Dense(y.shape[1], input_shape=(10,), activation="softmax"))
model.summary()
model.compile(loss="categorical_crossentropy", optimizer="Adam")
model.fit(x.values, y.values, epochs=2)
e = DeepExplainer(model, x.values[:1])
sv = e.shap_values(x.values)
assert np.abs(e.expected_value[0] + sv[0].sum(-1) - model(x.values)[:, 0]).max() < 1e-4
def test_tf_keras_mnist_cnn(): # pylint: disable=too-many-locals
""" This is the basic mnist cnn example from keras.
"""
tf = pytest.importorskip('tensorflow')
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Flatten, Activation
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import backend as K
from tensorflow.compat.v1 import ConfigProto
from tensorflow.compat.v1 import InteractiveSession
config = ConfigProto()
config.gpu_options.allow_growth = True
InteractiveSession(config=config)
tf.compat.v1.disable_eager_execution()
batch_size = 64
num_classes = 10
epochs = 1
# input image dimensions
img_rows, img_cols = 28, 28
# the data, split between train and test sets
# (x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train = np.random.randn(200, 28, 28)
y_train = np.random.randint(0, 9, 200)
x_test = np.random.randn(200, 28, 28)
y_test = np.random.randint(0, 9, 200)
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
# convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(2, kernel_size=(3, 3),
activation='relu',
input_shape=input_shape))
model.add(Conv2D(4, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(16, activation='relu')) # 128
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(),
metrics=['accuracy'])
model.fit(x_train[:10, :], y_train[:10, :],
batch_size=batch_size,
epochs=epochs,
verbose=1,
validation_data=(x_test[:10, :], y_test[:10, :]))
# explain by passing the tensorflow inputs and outputs
np.random.seed(0)
inds = np.random.choice(x_train.shape[0], 3, replace=False)
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].input), x_train[inds, :, :])
shap_values = e.shap_values(x_test[:1])
sess = tf.compat.v1.keras.backend.get_session()
diff = sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_test[:1]}) - \
sess.run(model.layers[-1].input, feed_dict={model.layers[0].input: x_train[inds, :, :]}).mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % d
def test_tf_keras_linear():
"""Test verifying that a linear model with linear data gives the correct result.
"""
tf = pytest.importorskip('tensorflow')
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, Input
from tensorflow.keras.optimizers import SGD
tf.compat.v1.disable_eager_execution()
np.random.seed(0)
# coefficients relating y with x1 and x2.
coef = np.array([1, 2]).T
# generate data following a linear relationship
x = np.random.normal(1, 10, size=(1000, len(coef)))
y = np.dot(x, coef) + 1 + np.random.normal(scale=0.1, size=1000)
# create a linear model
inputs = Input(shape=(2,))
preds = Dense(1, activation='linear')(inputs)
model = Model(inputs=inputs, outputs=preds)
model.compile(optimizer=SGD(), loss='mse', metrics=['mse'])
model.fit(x, y, epochs=30, shuffle=False, verbose=0)
fit_coef = model.layers[1].get_weights()[0].T[0]
# explain
e = shap.DeepExplainer((model.layers[0].input, model.layers[-1].output), x)
shap_values = e.shap_values(x)
# verify that the explanation follows the equation in LinearExplainer
values = shap_values[0] # since this is a "multi-output" model with one output
assert values.shape == (1000, 2)
expected = (x - x.mean(0)) * fit_coef
np.testing.assert_allclose(expected - values, 0, atol=1e-5)
def test_tf_keras_imdb_lstm():
""" Basic LSTM example using the keras API defined in tensorflow
"""
tf = pytest.importorskip('tensorflow')
# this fails right now for new TF versions (there is a warning in the code for this)
if version.parse(tf.__version__) >= version.parse("2.5.0"):
pytest.skip()
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Embedding
from tensorflow.keras.preprocessing import sequence
tf.compat.v1.disable_eager_execution()
# load the data from keras
np.random.seed(7)
max_features = 1000
try:
(X_train, _), (X_test, _) = imdb.load_data(num_words=max_features)
except Exception: # pylint: disable=broad-except
return # this hides a bug in the most recent version of keras that prevents data loading
X_train = sequence.pad_sequences(X_train, maxlen=100)
X_test = sequence.pad_sequences(X_test, maxlen=100)
# create the model. note that this is model is very small to make the test
# run quick and we don't care about accuracy here
mod = Sequential()
mod.add(Embedding(max_features, 8))
mod.add(LSTM(10, dropout=0.2, recurrent_dropout=0.2))
mod.add(Dense(1, activation='sigmoid'))
mod.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# select the background and test samples
inds = np.random.choice(X_train.shape[0], 3, replace=False)
background = X_train[inds]
testx = X_test[10:11]
# explain a prediction and make sure it sums to the difference between the average output
# over the background samples and the current output
sess = tf.compat.v1.keras.backend.get_session()
sess.run(tf.compat.v1.global_variables_initializer())
# For debugging, can view graph:
# writer = tf.compat.v1.summary.FileWriter("c:\\tmp", sess.graph)
# writer.close()
e = shap.DeepExplainer((mod.layers[0].input, mod.layers[-1].output), background)
shap_values = e.shap_values(testx)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
diff = sess.run(mod.layers[-1].output, feed_dict={mod.layers[0].input: testx})[0, :] - \
sess.run(mod.layers[-1].output, feed_dict={mod.layers[0].input: background}).mean(0)
assert np.allclose(sums, diff, atol=1e-02), "Sum of SHAP values does not match difference!"
def test_pytorch_mnist_cnn():
"""The same test as above, but for pytorch
"""
torch = pytest.importorskip('torch')
from torch import nn
from torch.nn import functional as F
class RandData:
""" Random test data.
"""
def __init__(self, batch_size):
self.current = 0
self.batch_size = batch_size
def __iter__(self):
return self
def __next__(self):
self.current += 1
if self.current < 10:
return torch.randn(self.batch_size, 1, 28, 28), torch.randint(0, 9, (self.batch_size,))
raise StopIteration
def run_test(train_loader, test_loader, interim):
class Net(nn.Module):
""" Basic conv net.
"""
def __init__(self):
super().__init__()
# Testing several different activations
self.conv_layers = nn.Sequential(
nn.Conv2d(1, 10, kernel_size=5),
nn.MaxPool2d(2),
nn.Tanh(),
nn.Conv2d(10, 20, kernel_size=5),
nn.ConvTranspose2d(20, 20, 1),
nn.AdaptiveAvgPool2d(output_size=(4, 4)),
nn.Softplus(),
)
self.fc_layers = nn.Sequential(
nn.Linear(320, 50),
nn.BatchNorm1d(50),
nn.ReLU(),
nn.Linear(50, 10),
nn.ELU(),
nn.Softmax(dim=1)
)
def forward(self, x):
""" Run the model.
"""
x = self.conv_layers(x)
x = x.view(-1, 320)
x = self.fc_layers(x)
return x
model = Net()
optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.5)
def train(model, device, train_loader, optimizer, _, cutoff=20):
model.train()
num_examples = 0
for _, (data, target) in enumerate(train_loader):
num_examples += target.shape[0]
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output, torch.eye(10)[target])
# loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
# if batch_idx % 10 == 0:
# # print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
# # epoch, batch_idx * len(data), len(train_loader.dataset),
# # 100. * batch_idx / len(train_loader), loss.item()))
if num_examples > cutoff:
break
device = torch.device('cpu')
train(model, device, train_loader, optimizer, 1)
next_x, _ = next(iter(train_loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 3, replace=False)
if interim:
e = shap.DeepExplainer((model, model.conv_layers[0]), next_x[inds, :, :, :])
else:
e = shap.DeepExplainer(model, next_x[inds, :, :, :])
test_x, _ = next(iter(test_loader))
input_tensor = test_x[:1]
input_tensor.requires_grad = True
shap_values = e.shap_values(input_tensor)
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :, :, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
batch_size = 32
try:
train_loader = RandData(batch_size)
test_loader = RandData(batch_size)
except HTTPError:
pytest.skip()
#print('Running test on interim layer')
run_test(train_loader, test_loader, interim=True)
#print('Running test on whole model')
run_test(train_loader, test_loader, interim=False)
def test_pytorch_custom_nested_models():
"""Testing single outputs
"""
torch = pytest.importorskip('torch')
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
data = TensorDataset(torch.tensor(X).float(),
torch.tensor(y).float())
loader = DataLoader(data, batch_size=128)
class CustomNet1(nn.Module):
""" Model 1.
"""
def __init__(self):
super().__init__()
self.net = nn.Sequential(
nn.Sequential(
nn.Conv1d(1, 1, 1),
nn.ConvTranspose1d(1, 1, 1),
),
nn.AdaptiveAvgPool1d(output_size=6),
)
def forward(self, X):
""" Run the model.
"""
return self.net(X.unsqueeze(1)).squeeze(1)
class CustomNet2(nn.Module):
""" Model 2.
"""
def __init__(self, num_features):
super().__init__()
self.net = nn.Sequential(
nn.LeakyReLU(),
nn.Linear(num_features // 2, 2)
)
def forward(self, X):
""" Run the model.
"""
return self.net(X).unsqueeze(1)
class CustomNet(nn.Module):
""" Model 3.
"""
def __init__(self, num_features):
super().__init__()
self.net1 = CustomNet1()
self.net2 = CustomNet2(num_features)
self.maxpool2 = nn.MaxPool1d(kernel_size=2)
def forward(self, X):
""" Run the model.
"""
x = self.net1(X)
return self.maxpool2(self.net2(x)).squeeze(1)
model = CustomNet(num_features)
optimizer = torch.optim.Adam(model.parameters())
def train(model, device, train_loader, optimizer, epoch):
model.train()
num_examples = 0
for batch_idx, (data, target) in enumerate(train_loader):
num_examples += target.shape[0]
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x, _ = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
e = shap.DeepExplainer(model, next_x[inds, :])
test_x, _ = next(iter(loader))
shap_values = e.shap_values(test_x[:1])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
def test_pytorch_single_output():
"""Testing single outputs
"""
torch = pytest.importorskip('torch')
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.datasets import load_boston
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
data = TensorDataset(torch.tensor(X).float(),
torch.tensor(y).float())
loader = DataLoader(data, batch_size=128)
class Net(nn.Module):
""" Test model.
"""
def __init__(self, num_features):
super().__init__()
self.linear = nn.Linear(num_features // 2, 2)
self.conv1d = nn.Conv1d(1, 1, 1)
self.convt1d = nn.ConvTranspose1d(1, 1, 1)
self.leaky_relu = nn.LeakyReLU()
self.aapool1d = nn.AdaptiveAvgPool1d(output_size=6)
self.maxpool2 = nn.MaxPool1d(kernel_size=2)
def forward(self, X):
""" Run the model.
"""
x = self.aapool1d(self.convt1d(self.conv1d(X.unsqueeze(1)))).squeeze(1)
return self.maxpool2(self.linear(self.leaky_relu(x)).unsqueeze(1)).squeeze(1)
model = Net(num_features)
optimizer = torch.optim.Adam(model.parameters())
def train(model, device, train_loader, optimizer, epoch):
model.train()
num_examples = 0
for batch_idx, (data, target) in enumerate(train_loader):
num_examples += target.shape[0]
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x, _ = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x.shape[0], 20, replace=False)
e = shap.DeepExplainer(model, next_x[inds, :])
test_x, _ = next(iter(loader))
shap_values = e.shap_values(test_x[:1])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x[:1]) - model(next_x[inds, :])).detach().numpy().mean(0)
sums = np.array([shap_values[i].sum() for i in range(len(shap_values))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
def test_pytorch_multiple_inputs():
""" Check a multi-input scenario.
"""
torch = pytest.importorskip('torch')
def _run_pytorch_multiple_inputs_test(disconnected):
""" Testing multiple inputs
"""
from torch import nn
from torch.nn import functional as F
from torch.utils.data import TensorDataset, DataLoader
from sklearn.datasets import load_boston
torch.manual_seed(1)
X, y = load_boston(return_X_y=True)
num_features = X.shape[1]
x1 = X[:, num_features // 2:]
x2 = X[:, :num_features // 2]
data = TensorDataset(torch.tensor(x1).float(),
torch.tensor(x2).float(),
torch.tensor(y).float())
loader = DataLoader(data, batch_size=128)
class Net(nn.Module):
""" Testing model.
"""
def __init__(self, num_features, disconnected):
super().__init__()
self.disconnected = disconnected
if disconnected:
num_features = num_features // 2 + 1
self.linear = nn.Linear(num_features, 2)
self.output = nn.Sequential(
nn.MaxPool1d(2),
nn.ReLU()
)
def forward(self, x1, x2):
""" Run the model.
"""
if self.disconnected:
x = self.linear(x1).unsqueeze(1)
else:
x = self.linear(torch.cat((x1, x2), dim=-1)).unsqueeze(1)
return self.output(x).squeeze(1)
model = Net(num_features, disconnected)
optimizer = torch.optim.Adam(model.parameters())
def train(model, device, train_loader, optimizer, epoch):
model.train()
num_examples = 0
for batch_idx, (data1, data2, target) in enumerate(train_loader):
num_examples += target.shape[0]
data1, data2, target = data1.to(device), data2.to(device), target.to(device)
optimizer.zero_grad()
output = model(data1, data2)
loss = F.mse_loss(output.squeeze(1), target)
loss.backward()
optimizer.step()
if batch_idx % 2 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
device = torch.device('cpu')
train(model, device, loader, optimizer, 1)
next_x1, next_x2, _ = next(iter(loader))
np.random.seed(0)
inds = np.random.choice(next_x1.shape[0], 20, replace=False)
background = [next_x1[inds, :], next_x2[inds, :]]
e = shap.DeepExplainer(model, background)
test_x1, test_x2, _ = next(iter(loader))
shap_x1, shap_x2 = e.shap_values([test_x1[:1], test_x2[:1]])
model.eval()
model.zero_grad()
with torch.no_grad():
diff = (model(test_x1[:1], test_x2[:1]) - model(*background)).detach().numpy().mean(0)
sums = np.array([shap_x1[i].sum() + shap_x2[i].sum() for i in range(len(shap_x1))])
d = np.abs(sums - diff).sum()
assert d / np.abs(diff).sum() < 0.001, "Sum of SHAP values does not match difference! %f" % (d / np.abs(diff).sum())
_run_pytorch_multiple_inputs_test(disconnected=True)
_run_pytorch_multiple_inputs_test(disconnected=False)
|
{
"content_hash": "7e1022a8f436fb84b46805247f0285af",
"timestamp": "",
"source": "github",
"line_count": 604,
"max_line_length": 124,
"avg_line_length": 37.28145695364238,
"alnum_prop": 0.5724753530508926,
"repo_name": "slundberg/shap",
"id": "f5bae4b085caad511fcc9f397fa3d2c8a2386ec0",
"size": "22518",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/explainers/test_deep.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C++",
"bytes": "154976"
},
{
"name": "Cuda",
"bytes": "14595"
},
{
"name": "HTML",
"bytes": "2393"
},
{
"name": "JavaScript",
"bytes": "55236"
},
{
"name": "Jupyter Notebook",
"bytes": "138364033"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "1295917"
}
],
"symlink_target": ""
}
|
from flask_wtf import FlaskForm
from json import loads
from wtforms import StringField, IntegerField, DateTimeField, SelectField
from wtforms.validators import DataRequired, Length, EqualTo
from project.server import db
from project.server.models import Track
class NewScheduleForm(FlaskForm):
name = StringField(
'Name',
validators=[
DataRequired(),
Length(min=6, max=255)
]
)
track_id = SelectField(
'Source',
choices=[(x['id'], x['name'])
for x in loads(Track.all_to_json(db.session))],
validators=[
DataRequired()
]
)
start_time = DateTimeField(
'Start time',
validators=[
DataRequired()
]
)
|
{
"content_hash": "895de26a424865a8b7f8c0998dc1e385",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 73,
"avg_line_length": 24.774193548387096,
"alnum_prop": 0.5924479166666666,
"repo_name": "runozo/palinsesto-fire",
"id": "32ea4f1dd1115a0e9861d329c7eb6410fba194d2",
"size": "801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/server/palinsesto/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "57"
},
{
"name": "HTML",
"bytes": "7864"
},
{
"name": "JavaScript",
"bytes": "347"
},
{
"name": "Python",
"bytes": "30174"
}
],
"symlink_target": ""
}
|
"""
Generic traversal of a tree-structured type
"""
class Traversal(object):
def visit_list(self, xs):
return [self.visit(x) for x in xs]
def visit_tuple(self, xs):
return tuple(self.visit_list(xs))
def visit_generic(self, x):
assert False, \
"Unsupported %s : %s" % (x, x.__class__.__name__)
def visit(self, x):
method_name = 'visit_' + x.__class__.__name__
if hasattr(self, method_name):
method = getattr(self, method_name)
return method(x)
else:
return self.visit_generic(x)
|
{
"content_hash": "457576114a767ab4f7c1a891345e54ef",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 55,
"avg_line_length": 23.125,
"alnum_prop": 0.5891891891891892,
"repo_name": "iskandr/dsltools",
"id": "4f12bc3c7b81e2c659960552c6093e5dcd04c251",
"size": "555",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dsltools/traversal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "10819"
}
],
"symlink_target": ""
}
|
import pytest
from tests.common.test_vector import ImpalaTestDimension
from tests.common.impala_test_suite import ImpalaTestSuite
MT_DOP_VALUES = [0, 1, 2, 8]
class TestParquetStats(ImpalaTestSuite):
"""
This suite tests runtime optimizations based on Parquet statistics.
"""
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestParquetStats, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(ImpalaTestDimension('mt_dop', *MT_DOP_VALUES))
cls.ImpalaTestMatrix.add_constraint(
lambda v: v.get_value('table_format').file_format == 'parquet')
def test_parquet_stats(self, vector, unique_database):
# The test makes assumptions about the number of row groups that are processed and
# skipped inside a fragment, so we ensure that the tests run in a single fragment.
vector.get_value('exec_option')['num_nodes'] = 1
self.run_test_case('QueryTest/parquet_stats', vector, use_db=unique_database)
|
{
"content_hash": "40a078608b1f4ae78afbed394e4ad928",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 86,
"avg_line_length": 36.607142857142854,
"alnum_prop": 0.7375609756097561,
"repo_name": "michaelhkw/incubator-impala",
"id": "9b9d6d77e6ffcd194230105001650147844cdec4",
"size": "1811",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/query_test/test_parquet_stats.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "420966"
},
{
"name": "C++",
"bytes": "9610318"
},
{
"name": "CMake",
"bytes": "164075"
},
{
"name": "CSS",
"bytes": "148115"
},
{
"name": "HTML",
"bytes": "56"
},
{
"name": "Java",
"bytes": "4510111"
},
{
"name": "JavaScript",
"bytes": "1202163"
},
{
"name": "Lex",
"bytes": "23576"
},
{
"name": "Objective-C",
"bytes": "1205"
},
{
"name": "PLpgSQL",
"bytes": "3646"
},
{
"name": "Protocol Buffer",
"bytes": "630"
},
{
"name": "Python",
"bytes": "2666179"
},
{
"name": "Roff",
"bytes": "1633"
},
{
"name": "SQLPL",
"bytes": "1558"
},
{
"name": "Shell",
"bytes": "244435"
},
{
"name": "Thrift",
"bytes": "244539"
}
],
"symlink_target": ""
}
|
import unittest
import os
import sys
import commands
import comm
class TestSampleAppFunctions(unittest.TestCase):
def test_launch(self):
comm.setUp()
app_name = "Helloworld"
cmd = "adb -s " + comm.device + " shell am start -n org.xwalk.%s/.%sActivity" % \
(app_name.lower(), app_name)
comm.app_launch(cmd, self)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "f6d61a5e9a7e7b34d7400aea412d6c06",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 89,
"avg_line_length": 22.833333333333332,
"alnum_prop": 0.6082725060827251,
"repo_name": "XiaosongWei/crosswalk-test-suite",
"id": "fd60f3a11f309f04ec62701c49246e6ce57f6396",
"size": "1959",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "misc/sampleapp-android-tests/sampleapp/helloworld_launch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1693"
},
{
"name": "C",
"bytes": "28136"
},
{
"name": "CSS",
"bytes": "403677"
},
{
"name": "CoffeeScript",
"bytes": "18978"
},
{
"name": "Cucumber",
"bytes": "76562"
},
{
"name": "GLSL",
"bytes": "6990"
},
{
"name": "Groff",
"bytes": "12"
},
{
"name": "HTML",
"bytes": "41078525"
},
{
"name": "Java",
"bytes": "786204"
},
{
"name": "JavaScript",
"bytes": "4639929"
},
{
"name": "Logos",
"bytes": "12"
},
{
"name": "Makefile",
"bytes": "1044"
},
{
"name": "PHP",
"bytes": "45668"
},
{
"name": "Python",
"bytes": "4057992"
},
{
"name": "Shell",
"bytes": "850195"
}
],
"symlink_target": ""
}
|
"""Nova common internal object model"""
import collections
import contextlib
import copy
import datetime
import functools
import traceback
import netaddr
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovoo_base
import six
from nova import context
from nova import exception
from nova.i18n import _, _LE
from nova import objects
from nova.objects import fields as obj_fields
from nova.openstack.common import versionutils
from nova import utils
LOG = logging.getLogger('object')
def get_attrname(name):
"""Return the mangled name of the attribute's underlying storage."""
return '_' + name
def make_class_properties(cls):
# NOTE(danms/comstud): Inherit fields from super classes.
# mro() returns the current class first and returns 'object' last, so
# those can be skipped. Also be careful to not overwrite any fields
# that already exist. And make sure each cls has its own copy of
# fields and that it is not sharing the dict with a super class.
cls.fields = dict(cls.fields)
for supercls in cls.mro()[1:-1]:
if not hasattr(supercls, 'fields'):
continue
for name, field in supercls.fields.items():
if name not in cls.fields:
cls.fields[name] = field
for name, field in cls.fields.iteritems():
if not isinstance(field, obj_fields.Field):
raise exception.ObjectFieldInvalid(
field=name, objname=cls.obj_name())
def getter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
self.obj_load_attr(name)
return getattr(self, attrname)
def setter(self, value, name=name, field=field):
attrname = get_attrname(name)
field_value = field.coerce(self, name, value)
if field.read_only and hasattr(self, attrname):
# Note(yjiang5): _from_db_object() may iterate
# every field and write, no exception in such situation.
if getattr(self, attrname) != field_value:
raise exception.ReadOnlyFieldError(field=name)
else:
return
self._changed_fields.add(name)
try:
return setattr(self, attrname, field_value)
except Exception:
attr = "%s.%s" % (self.obj_name(), name)
LOG.exception(_LE('Error setting %(attr)s'), {'attr': attr})
raise
def deleter(self, name=name):
attrname = get_attrname(name)
if not hasattr(self, attrname):
raise AttributeError('No such attribute `%s' % name)
delattr(self, get_attrname(name))
setattr(cls, name, property(getter, setter, deleter))
class NovaObjectMetaclass(type):
"""Metaclass that allows tracking of object classes."""
# NOTE(danms): This is what controls whether object operations are
# remoted. If this is not None, use it to remote things over RPC.
indirection_api = None
def __init__(cls, names, bases, dict_):
if not hasattr(cls, '_obj_classes'):
# This means this is a base class using the metaclass. I.e.,
# the 'NovaObject' class.
cls._obj_classes = collections.defaultdict(list)
return
def _vers_tuple(obj):
return tuple([int(x) for x in obj.VERSION.split(".")])
# Add the subclass to NovaObject._obj_classes. If the
# same version already exists, replace it. Otherwise,
# keep the list with newest version first.
make_class_properties(cls)
obj_name = cls.obj_name()
for i, obj in enumerate(cls._obj_classes[obj_name]):
if cls.VERSION == obj.VERSION:
cls._obj_classes[obj_name][i] = cls
# Update nova.objects with this newer class.
setattr(objects, obj_name, cls)
break
if _vers_tuple(cls) > _vers_tuple(obj):
# Insert before.
cls._obj_classes[obj_name].insert(i, cls)
if i == 0:
# Later version than we've seen before. Update
# nova.objects.
setattr(objects, obj_name, cls)
break
else:
cls._obj_classes[obj_name].append(cls)
# Either this is the first time we've seen the object or it's
# an older version than anything we'e seen. Update nova.objects
# only if it's the first time we've seen this object name.
if not hasattr(objects, obj_name):
setattr(objects, obj_name, cls)
# These are decorators that mark an object's method as remotable.
# If the metaclass is configured to forward object methods to an
# indirection service, these will result in making an RPC call
# instead of directly calling the implementation in the object. Instead,
# the object implementation on the remote end will perform the
# requested action and the result will be returned here.
def remotable_classmethod(fn):
"""Decorator for remotable classmethods."""
@functools.wraps(fn)
def wrapper(cls, context, *args, **kwargs):
if NovaObject.indirection_api:
result = NovaObject.indirection_api.object_class_action(
context, cls.obj_name(), fn.__name__, cls.VERSION,
args, kwargs)
else:
result = fn(cls, context, *args, **kwargs)
if isinstance(result, NovaObject):
result._context = context
return result
# NOTE(danms): Make this discoverable
wrapper.remotable = True
wrapper.original_fn = fn
return classmethod(wrapper)
# See comment above for remotable_classmethod()
#
# Note that this will use either the provided context, or the one
# stashed in the object. If neither are present, the object is
# "orphaned" and remotable methods cannot be called.
def remotable(fn):
"""Decorator for remotable object methods."""
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
if args and isinstance(args[0], context.RequestContext):
raise exception.ObjectActionError(
action=fn.__name__,
reason='Calling remotables with context is deprecated')
if self._context is None:
raise exception.OrphanedObjectError(method=fn.__name__,
objtype=self.obj_name())
if NovaObject.indirection_api:
updates, result = NovaObject.indirection_api.object_action(
self._context, self, fn.__name__, args, kwargs)
for key, value in updates.iteritems():
if key in self.fields:
field = self.fields[key]
# NOTE(ndipanov): Since NovaObjectSerializer will have
# deserialized any object fields into objects already,
# we do not try to deserialize them again here.
if isinstance(value, NovaObject):
setattr(self, key, value)
else:
setattr(self, key,
field.from_primitive(self, key, value))
self.obj_reset_changes()
self._changed_fields = set(updates.get('obj_what_changed', []))
return result
else:
return fn(self, *args, **kwargs)
wrapper.remotable = True
wrapper.original_fn = fn
return wrapper
@six.add_metaclass(NovaObjectMetaclass)
class NovaObject(object):
"""Base class and object factory.
This forms the base of all objects that can be remoted or instantiated
via RPC. Simply defining a class that inherits from this base class
will make it remotely instantiatable. Objects should implement the
necessary "get" classmethod routines as well as "save" object methods
as appropriate.
"""
# Object versioning rules
#
# Each service has its set of objects, each with a version attached. When
# a client attempts to call an object method, the server checks to see if
# the version of that object matches (in a compatible way) its object
# implementation. If so, cool, and if not, fail.
#
# This version is allowed to have three parts, X.Y.Z, where the .Z element
# is reserved for stable branch backports. The .Z is ignored for the
# purposes of triggering a backport, which means anything changed under
# a .Z must be additive and non-destructive such that a node that knows
# about X.Y can consider X.Y.Z equivalent.
VERSION = '1.0'
# The fields present in this object as key:field pairs. For example:
#
# fields = { 'foo': fields.IntegerField(),
# 'bar': fields.StringField(),
# }
fields = {}
obj_extra_fields = []
# Table of sub-object versioning information
#
# This contains a list of version mappings, by the field name of
# the subobject. The mappings must be in order of oldest to
# newest, and are tuples of (my_version, subobject_version). A
# request to backport this object to $my_version will cause the
# subobject to be backported to $subobject_version.
#
# obj_relationships = {
# 'subobject1': [('1.2', '1.1'), ('1.4', '1.2')],
# 'subobject2': [('1.2', '1.0')],
# }
#
# In the above example:
#
# - If we are asked to backport our object to version 1.3,
# subobject1 will be backported to version 1.1, since it was
# bumped to version 1.2 when our version was 1.4.
# - If we are asked to backport our object to version 1.5,
# no changes will be made to subobject1 or subobject2, since
# they have not changed since version 1.4.
# - If we are asked to backlevel our object to version 1.1, we
# will remove both subobject1 and subobject2 from the primitive,
# since they were not added until version 1.2.
obj_relationships = {}
def __init__(self, context=None, **kwargs):
self._changed_fields = set()
self._context = context
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
return '%s(%s)' % (
self.obj_name(),
','.join(['%s=%s' % (name,
(self.obj_attr_is_set(name) and
field.stringify(getattr(self, name)) or
'<?>'))
for name, field in sorted(self.fields.items())]))
@classmethod
def obj_name(cls):
"""Return a canonical name for this object which will be used over
the wire for remote hydration.
"""
return cls.__name__
@classmethod
def obj_class_from_name(cls, objname, objver):
"""Returns a class from the registry based on a name and version."""
if objname not in cls._obj_classes:
LOG.error(_LE('Unable to instantiate unregistered object type '
'%(objtype)s'), dict(objtype=objname))
raise exception.UnsupportedObjectError(objtype=objname)
# NOTE(comstud): If there's not an exact match, return the highest
# compatible version. The objects stored in the class are sorted
# such that highest version is first, so only set compatible_match
# once below.
compatible_match = None
for objclass in cls._obj_classes[objname]:
if objclass.VERSION == objver:
return objclass
if (not compatible_match and
versionutils.is_compatible(objver, objclass.VERSION)):
compatible_match = objclass
if compatible_match:
return compatible_match
# As mentioned above, latest version is always first in the list.
latest_ver = cls._obj_classes[objname][0].VERSION
raise exception.IncompatibleObjectVersion(objname=objname,
objver=objver,
supported=latest_ver)
@classmethod
def _obj_from_primitive(cls, context, objver, primitive):
self = cls()
self._context = context
self.VERSION = objver
objdata = primitive['nova_object.data']
changes = primitive.get('nova_object.changes', [])
for name, field in self.fields.items():
if name in objdata:
setattr(self, name, field.from_primitive(self, name,
objdata[name]))
self._changed_fields = set([x for x in changes if x in self.fields])
return self
@classmethod
def obj_from_primitive(cls, primitive, context=None):
"""Object field-by-field hydration."""
if primitive['nova_object.namespace'] != 'nova':
# NOTE(danms): We don't do anything with this now, but it's
# there for "the future"
raise exception.UnsupportedObjectError(
objtype='%s.%s' % (primitive['nova_object.namespace'],
primitive['nova_object.name']))
objname = primitive['nova_object.name']
objver = primitive['nova_object.version']
objclass = cls.obj_class_from_name(objname, objver)
return objclass._obj_from_primitive(context, objver, primitive)
def __deepcopy__(self, memo):
"""Efficiently make a deep copy of this object."""
# NOTE(danms): A naive deepcopy would copy more than we need,
# and since we have knowledge of the volatile bits of the
# object, we can be smarter here. Also, nested entities within
# some objects may be uncopyable, so we can avoid those sorts
# of issues by copying only our field data.
nobj = self.__class__()
nobj._context = self._context
for name in self.fields:
if self.obj_attr_is_set(name):
nval = copy.deepcopy(getattr(self, name), memo)
setattr(nobj, name, nval)
nobj._changed_fields = set(self._changed_fields)
return nobj
def obj_clone(self):
"""Create a copy."""
return copy.deepcopy(self)
def obj_calculate_child_version(self, target_version, child):
"""Calculate the appropriate version for a child object.
This is to be used when backporting an object for an older client.
A sub-object will need to be backported to a suitable version for
the client as well, and this method will calculate what that
version should be, based on obj_relationships.
:param target_version: Version this object is being backported to
:param child: The child field for which the appropriate version
is to be calculated
:returns: None if the child should be omitted from the backport,
otherwise, the version to which the child should be
backported
"""
target_version = utils.convert_version_to_tuple(target_version)
for index, versions in enumerate(self.obj_relationships[child]):
my_version, child_version = versions
my_version = utils.convert_version_to_tuple(my_version)
if target_version < my_version:
if index == 0:
# We're backporting to a version from before this
# subobject was added: delete it from the primitive.
return None
else:
# We're in the gap between index-1 and index, so
# backport to the older version
return self.obj_relationships[child][index - 1][1]
elif target_version == my_version:
# This is the first mapping that satisfies the
# target_version request: backport the object.
return child_version
# No need to backport, as far as we know, so return the latest
# version of the sub-object we know about
return self.obj_relationships[child][-1][1]
def _obj_make_obj_compatible(self, primitive, target_version, field):
"""Backlevel a sub-object based on our versioning rules.
This is responsible for backporting objects contained within
this object's primitive according to a set of rules we
maintain about version dependencies between objects. This
requires that the obj_relationships table in this object is
correct and up-to-date.
:param:primitive: The primitive version of this object
:param:target_version: The version string requested for this object
:param:field: The name of the field in this object containing the
sub-object to be backported
"""
def _do_backport(to_version):
obj = getattr(self, field)
if obj is None:
return
if isinstance(obj, NovaObject):
if to_version != primitive[field]['nova_object.version']:
obj.obj_make_compatible(
primitive[field]['nova_object.data'],
to_version)
primitive[field]['nova_object.version'] = to_version
elif isinstance(obj, list):
for i, element in enumerate(obj):
element.obj_make_compatible(
primitive[field][i]['nova_object.data'],
to_version)
primitive[field][i]['nova_object.version'] = to_version
child_version = self.obj_calculate_child_version(target_version, field)
if child_version is None:
del primitive[field]
else:
_do_backport(child_version)
def obj_make_compatible(self, primitive, target_version):
"""Make an object representation compatible with a target version.
This is responsible for taking the primitive representation of
an object and making it suitable for the given target_version.
This may mean converting the format of object attributes, removing
attributes that have been added since the target version, etc. In
general:
- If a new version of an object adds a field, this routine
should remove it for older versions.
- If a new version changed or restricted the format of a field, this
should convert it back to something a client knowing only of the
older version will tolerate.
- If an object that this object depends on is bumped, then this
object should also take a version bump. Then, this routine should
backlevel the dependent object (by calling its obj_make_compatible())
if the requested version of this object is older than the version
where the new dependent object was added.
:param:primitive: The result of self.obj_to_primitive()
:param:target_version: The version string requested by the recipient
of the object
:raises: nova.exception.UnsupportedObjectError if conversion
is not possible for some reason
"""
for key, field in self.fields.items():
if not isinstance(field, (obj_fields.ObjectField,
obj_fields.ListOfObjectsField)):
continue
if not self.obj_attr_is_set(key):
continue
if key not in self.obj_relationships:
# NOTE(danms): This is really a coding error and shouldn't
# happen unless we miss something
raise exception.ObjectActionError(
action='obj_make_compatible',
reason='No rule for %s' % key)
self._obj_make_obj_compatible(primitive, target_version, key)
def obj_to_primitive(self, target_version=None):
"""Simple base-case dehydration.
This calls to_primitive() for each item in fields.
"""
primitive = dict()
for name, field in self.fields.items():
if self.obj_attr_is_set(name):
primitive[name] = field.to_primitive(self, name,
getattr(self, name))
if target_version:
self.obj_make_compatible(primitive, target_version)
obj = {'nova_object.name': self.obj_name(),
'nova_object.namespace': 'nova',
'nova_object.version': target_version or self.VERSION,
'nova_object.data': primitive}
if self.obj_what_changed():
obj['nova_object.changes'] = list(self.obj_what_changed())
return obj
def obj_set_defaults(self, *attrs):
if not attrs:
attrs = [name for name, field in self.fields.items()
if field.default != obj_fields.UnspecifiedDefault]
for attr in attrs:
default = copy.deepcopy(self.fields[attr].default)
if default is obj_fields.UnspecifiedDefault:
raise exception.ObjectActionError(
action='set_defaults',
reason='No default set for field %s' % attr)
if not self.obj_attr_is_set(attr):
setattr(self, attr, default)
def obj_load_attr(self, attrname):
"""Load an additional attribute from the real object.
This should use self._conductor, and cache any data that might
be useful for future load operations.
"""
raise NotImplementedError(
_("Cannot load '%s' in the base class") % attrname)
def save(self, context):
"""Save the changed fields back to the store.
This is optional for subclasses, but is presented here in the base
class for consistency among those that do.
"""
raise NotImplementedError(_('Cannot save anything in the base class'))
def obj_what_changed(self):
"""Returns a set of fields that have been modified."""
changes = set(self._changed_fields)
for field in self.fields:
if (self.obj_attr_is_set(field) and
isinstance(getattr(self, field), NovaObject) and
getattr(self, field).obj_what_changed()):
changes.add(field)
return changes
def obj_get_changes(self):
"""Returns a dict of changed fields and their new values."""
changes = {}
for key in self.obj_what_changed():
changes[key] = getattr(self, key)
return changes
def obj_reset_changes(self, fields=None, recursive=False):
"""Reset the list of fields that have been changed.
:param fields: List of fields to reset, or "all" if None.
:param recursive: Call obj_reset_changes(recursive=True) on
any sub-objects within the list of fields
being reset.
NOTE: This is NOT "revert to previous values"
NOTE: Specifying fields on recursive resets will only be
honored at the top level. Everything below the top
will reset all.
"""
if recursive:
for field in self.obj_get_changes():
# Ignore fields not in requested set (if applicable)
if fields and field not in fields:
continue
# Skip any fields that are unset
if not self.obj_attr_is_set(field):
continue
value = getattr(self, field)
# Don't reset nulled fields
if value is None:
continue
# Reset straight Object and ListOfObjects fields
if isinstance(self.fields[field], obj_fields.ObjectField):
value.obj_reset_changes(recursive=True)
elif isinstance(self.fields[field],
obj_fields.ListOfObjectsField):
for thing in value:
thing.obj_reset_changes(recursive=True)
if fields:
self._changed_fields -= set(fields)
else:
self._changed_fields.clear()
def obj_attr_is_set(self, attrname):
"""Test object to see if attrname is present.
Returns True if the named attribute has a value set, or
False if not. Raises AttributeError if attrname is not
a valid attribute for this object.
"""
if attrname not in self.obj_fields:
raise AttributeError(
_("%(objname)s object has no attribute '%(attrname)s'") %
{'objname': self.obj_name(), 'attrname': attrname})
return hasattr(self, get_attrname(attrname))
@property
def obj_fields(self):
return self.fields.keys() + self.obj_extra_fields
# NOTE(danms): This is nova-specific, so don't copy this to o.vo
@contextlib.contextmanager
def obj_alternate_context(self, context):
original_context = self._context
self._context = context
try:
yield
finally:
self._context = original_context
@contextlib.contextmanager
def obj_as_admin(self):
"""Context manager to make an object call as an admin.
This temporarily modifies the context embedded in an object to
be elevated() and restores it after the call completes. Example
usage:
with obj.obj_as_admin():
obj.save()
"""
if self._context is None:
raise exception.OrphanedObjectError(method='obj_as_admin',
objtype=self.obj_name())
original_context = self._context
self._context = self._context.elevated()
try:
yield
finally:
self._context = original_context
class NovaObjectDictCompat(ovoo_base.VersionedObjectDictCompat):
pass
class NovaTimestampObject(object):
"""Mixin class for db backed objects with timestamp fields.
Sqlalchemy models that inherit from the oslo_db TimestampMixin will include
these fields and the corresponding objects will benefit from this mixin.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
}
class NovaPersistentObject(object):
"""Mixin class for Persistent objects.
This adds the fields that we use in common for most persistent objects.
"""
fields = {
'created_at': obj_fields.DateTimeField(nullable=True),
'updated_at': obj_fields.DateTimeField(nullable=True),
'deleted_at': obj_fields.DateTimeField(nullable=True),
'deleted': obj_fields.BooleanField(default=False),
}
class ObjectListBase(ovoo_base.ObjectListBase):
# NOTE(danms): These are for transition to using the oslo
# base object and can be removed when we move to it.
@classmethod
def _obj_primitive_key(cls, field):
return 'nova_object.%s' % field
@classmethod
def _obj_primitive_field(cls, primitive, field,
default=obj_fields.UnspecifiedDefault):
key = cls._obj_primitive_key(field)
if default == obj_fields.UnspecifiedDefault:
return primitive[key]
else:
return primitive.get(key, default)
class NovaObjectSerializer(messaging.NoOpSerializer):
"""A NovaObject-aware Serializer.
This implements the Oslo Serializer interface and provides the
ability to serialize and deserialize NovaObject entities. Any service
that needs to accept or return NovaObjects as arguments or result values
should pass this to its RPCClient and RPCServer objects.
"""
@property
def conductor(self):
if not hasattr(self, '_conductor'):
from nova import conductor
self._conductor = conductor.API()
return self._conductor
def _process_object(self, context, objprim):
try:
objinst = NovaObject.obj_from_primitive(objprim, context=context)
except exception.IncompatibleObjectVersion as e:
objver = objprim['nova_object.version']
if objver.count('.') == 2:
# NOTE(danms): For our purposes, the .z part of the version
# should be safe to accept without requiring a backport
objprim['nova_object.version'] = \
'.'.join(objver.split('.')[:2])
return self._process_object(context, objprim)
objinst = self.conductor.object_backport(context, objprim,
e.kwargs['supported'])
return objinst
def _process_iterable(self, context, action_fn, values):
"""Process an iterable, taking an action on each value.
:param:context: Request context
:param:action_fn: Action to take on each item in values
:param:values: Iterable container of things to take action on
:returns: A new container of the same type (except set) with
items from values having had action applied.
"""
iterable = values.__class__
if issubclass(iterable, dict):
return iterable(**{k: action_fn(context, v)
for k, v in six.iteritems(values)})
else:
# NOTE(danms, gibi) A set can't have an unhashable value inside,
# such as a dict. Convert the set to list, which is fine, since we
# can't send them over RPC anyway. We convert it to list as this
# way there will be no semantic change between the fake rpc driver
# used in functional test and a normal rpc driver.
if iterable == set:
iterable = list
return iterable([action_fn(context, value) for value in values])
def serialize_entity(self, context, entity):
if isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.serialize_entity,
entity)
elif (hasattr(entity, 'obj_to_primitive') and
callable(entity.obj_to_primitive)):
entity = entity.obj_to_primitive()
return entity
def deserialize_entity(self, context, entity):
if isinstance(entity, dict) and 'nova_object.name' in entity:
entity = self._process_object(context, entity)
elif isinstance(entity, (tuple, list, set, dict)):
entity = self._process_iterable(context, self.deserialize_entity,
entity)
return entity
def obj_to_primitive(obj):
"""Recursively turn an object into a python primitive.
A NovaObject becomes a dict, and anything that implements ObjectListBase
becomes a list.
"""
if isinstance(obj, ObjectListBase):
return [obj_to_primitive(x) for x in obj]
elif isinstance(obj, NovaObject):
result = {}
for key in obj.obj_fields:
if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
result[key] = obj_to_primitive(getattr(obj, key))
return result
elif isinstance(obj, netaddr.IPAddress):
return str(obj)
elif isinstance(obj, netaddr.IPNetwork):
return str(obj)
else:
return obj
def obj_make_list(context, list_obj, item_cls, db_list, **extra_args):
"""Construct an object list from a list of primitives.
This calls item_cls._from_db_object() on each item of db_list, and
adds the resulting object to list_obj.
:param:context: Request context
:param:list_obj: An ObjectListBase object
:param:item_cls: The NovaObject class of the objects within the list
:param:db_list: The list of primitives to convert to objects
:param:extra_args: Extra arguments to pass to _from_db_object()
:returns: list_obj
"""
list_obj.objects = []
for db_item in db_list:
item = item_cls._from_db_object(context, item_cls(), db_item,
**extra_args)
list_obj.objects.append(item)
list_obj._context = context
list_obj.obj_reset_changes()
return list_obj
def serialize_args(fn):
"""Decorator that will do the arguments serialization before remoting."""
def wrapper(obj, *args, **kwargs):
args = [timeutils.strtime(at=arg) if isinstance(arg, datetime.datetime)
else arg for arg in args]
for k, v in kwargs.iteritems():
if k == 'exc_val' and v:
kwargs[k] = str(v)
elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
kwargs[k] = ''.join(traceback.format_tb(v))
elif isinstance(v, datetime.datetime):
kwargs[k] = timeutils.strtime(at=v)
if hasattr(fn, '__call__'):
return fn(obj, *args, **kwargs)
# NOTE(danms): We wrap a descriptor, so use that protocol
return fn.__get__(None, obj)(*args, **kwargs)
# NOTE(danms): Make this discoverable
wrapper.remotable = getattr(fn, 'remotable', False)
wrapper.original_fn = fn
return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
else classmethod(wrapper))
|
{
"content_hash": "8a276422c542186ca89e509247591b4a",
"timestamp": "",
"source": "github",
"line_count": 818,
"max_line_length": 79,
"avg_line_length": 41.12347188264059,
"alnum_prop": 0.5994530158447041,
"repo_name": "bgxavier/nova",
"id": "8018ec5b9a1c33c4a23f5e6d6d7f99ab29886676",
"size": "34244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/objects/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16001553"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "326472"
}
],
"symlink_target": ""
}
|
from django.utils import timezone
from haystack.indexes import SearchIndex, CharField, DateTimeField, Indexable
from .models import Channel
class ChannelIndex(SearchIndex, Indexable):
text = CharField(document=True, use_template=True)
date_available = DateTimeField(model_attr='date_available')
date_update = DateTimeField(model_attr='date_update')
def get_model(self):
return Channel
def get_updated_field(self):
return 'date_update'
def index_queryset(self, using=None):
return Channel.objects.filter(
date_available__lte=timezone.now(),
published=True)
|
{
"content_hash": "e3a865a9db4e9b344915247373dfa77b",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 77,
"avg_line_length": 30.19047619047619,
"alnum_prop": 0.7050473186119873,
"repo_name": "williamroot/opps",
"id": "71e476766cca002258bca5413d9e1c8e46729b9d",
"size": "680",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "opps/channels/search_indexes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "13004"
},
{
"name": "HTML",
"bytes": "56927"
},
{
"name": "JavaScript",
"bytes": "62514"
},
{
"name": "Makefile",
"bytes": "848"
},
{
"name": "Python",
"bytes": "1387220"
},
{
"name": "Shell",
"bytes": "661"
}
],
"symlink_target": ""
}
|
"""Script to parse CNV files
"""
from bigquery_etl.utils import gcutils
from bigquery_etl.extract.gcloud_wrapper import GcsConnector
from bigquery_etl.utils.logging_manager import configure_logging
def parse_cnv(project_id, bucket_name, filename, outfilename, metadata):
"""Download and convert blob into dataframe
Transform the file: includes data cleaning
Add Metadata information
"""
# setup logging
log = configure_logging('cnv', "logs/cnv_transform_" + metadata['AliquotBarcode'] + '.log')
# connect to the cloud bucket
try:
log.info('start transform of %s' % (metadata['AliquotBarcode']))
gcs = GcsConnector(project_id, bucket_name)
#main steps: download, convert to df, cleanup, transform, add metadata
log.info('\tadd changes and metadata for %s' % (metadata['AliquotBarcode']))
data_df = gcutils.convert_blob_to_dataframe(gcs, project_id, bucket_name, filename, log=log)
data_df = additional_changes(data_df)
data_df = add_metadata(data_df, metadata)
# upload the contents of the dataframe in njson format
status = gcs.convert_df_to_njson_and_upload(data_df, outfilename)
log.info('finished transform of %s' % (metadata['AliquotBarcode']))
except Exception as e:
log.exception('problem transforming %s' % (metadata['AliquotBarcode']))
raise e
return status
def additional_changes(data_df):
"""Make additional data transformations on the dataframe
"""
data_df['Segment_Mean'] = data_df['Segment_Mean'].map(lambda x: "{0:.4f}".format(float(x)))
data_df['Num_Probes'] = data_df['Num_Probes'].map(lambda x: str(int(float(x))))
data_df['Start'] = data_df['Start'].map(lambda x: str(int(float(x))))
data_df['End'] = data_df['End'].map(lambda x: str(int(float(x))))
return data_df
def add_metadata(data_df, metadata):
"""Add metadata info to the dataframe
"""
data_df['AliquotBarcode'] = metadata['AliquotBarcode']
data_df['SampleBarcode'] = metadata['SampleBarcode']
data_df['ParticipantBarcode'] = metadata['ParticipantBarcode']
data_df['Study'] = metadata['Study'].upper()
data_df['SampleTypeLetterCode'] = metadata['SampleTypeLetterCode']
data_df['Platform'] = metadata['Platform']
data_df['Pipeline'] = metadata['Pipeline']
data_df['Center'] = metadata['DataCenterName']
return data_df
if __name__ == '__main__':
# project_id = sys.argv[1]
# bucket_name = sys.argv[2]
# filename = sys.argv[3]
# outfilename = sys.argv[4]
# metadata = {'AliquotBarcode':'test', 'SampleBarcode':'t', 'ParticipantBarcode':'t', 'Study':'e', 'SampleTypeLetterCode':'f', 'Platform':'r'}
parse_isoform(project_id, bucket_name, filename, outfilename, metadata)
|
{
"content_hash": "e85c3847713720344d081a0854ee7a29",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 145,
"avg_line_length": 44.317460317460316,
"alnum_prop": 0.666189111747851,
"repo_name": "isb-cgc/ISB-CGC-data-proc",
"id": "da114a031a712813cb67514475eeee1204316312",
"size": "3405",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tcga_etl_pipeline/cnv/transform.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6576"
},
{
"name": "Python",
"bytes": "1169886"
},
{
"name": "Shell",
"bytes": "1068"
}
],
"symlink_target": ""
}
|
import logging
import lldb
log = logging.getLogger('disassembly')
MAX_INSTR_BYTES = 8 # Max number of instruction bytes to show.
NO_SYMBOL_INSTRUCTIONS = 32 # How many instructions to show when there isn't a symbol associated
# with the PC location.
# bisect_left with get_key
def lower_bound(a, x, get_key = lambda x: x):
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if get_key(a[mid]) < x: lo = mid+1
else: hi = mid
return lo
# bisect_right with get_key
def upper_bound(a, x, get_key = lambda x: x):
lo = 0
hi = len(a)
while lo < hi:
mid = (lo+hi)//2
if x < get_key(a[mid]): hi = mid
else: lo = mid+1
return lo
# Find a Dissassembly whose range the address belongs to (assuming a is sorted on start_address)
def find(a, address):
i = upper_bound(a, address, lambda dasm: dasm.start_address) - 1
if i >= 0 and a[i].start_address <= address < a[i].end_address:
return a[i]
else:
return None
# Insert Dissassembly in sorted order
def insert(a, dasm):
i = lower_bound(a, dasm.start_address, lambda dasm: dasm.start_address)
assert i == len(a) or dasm.start_address != a[i].start_address
a.insert(i, dasm)
class Disassembly:
start_sbaddr = None # SBAddress
start_address = None # physical address
end_address = None # physical address
target = None
source_ref = None
def __init__(self, pc_sbaddr, target):
self.target = target
self.symbol = symbol = pc_sbaddr.GetSymbol()
pc_address = pc_sbaddr.GetLoadAddress(self.target)
if symbol.IsValid():
self.start_sbaddr = symbol.GetStartAddress()
self.start_address = self.start_sbaddr.GetLoadAddress(self.target)
self.source_name = '%s @%x' % (symbol.GetName(), self.start_address)
self.instructions = symbol.GetInstructions(self.target)
last_instr = self.instructions[len(self.instructions)-1]
self.end_address = last_instr.GetAddress().GetLoadAddress(self.target) + last_instr.GetByteSize()
if not symbol.IsValid() or not (self.start_address <= pc_address < self.end_address):
# Just read some instructions around the PC location.
self.start_sbaddr = pc_sbaddr
self.start_address = pc_sbaddr.GetLoadAddress(self.target)
self.source_name = "@%x" % self.start_address
self.instructions = self.target.ReadInstructions(pc_sbaddr, NO_SYMBOL_INSTRUCTIONS)
last_instr = self.instructions[len(self.instructions)-1]
self.end_address = last_instr.GetAddress().GetLoadAddress(self.target) + last_instr.GetByteSize()
assert self.start_address <= pc_address < self.end_address
self.addresses = [-1, -1] # addresses corresponding to source lines (-1 = comment)
for instr in self.instructions:
self.addresses.append(instr.GetAddress().GetLoadAddress(self.target))
def line_num_by_address(self, load_addr):
return lower_bound(self.addresses, load_addr) + 1 # lines numbers are 1-based
def address_by_line_num(self, line_num):
return self.addresses[line_num - 1]
def get_source_ref(self):
return { 'name': self.source_name, 'sourceReference': self.source_ref }
def get_source_text(self):
line_entry = self.start_sbaddr.GetLineEntry()
if line_entry.IsValid():
source_location = '%s:%d' % (line_entry.GetFileSpec(), line_entry.GetLine())
else:
source_location = 'unknown'
if self.symbol.IsValid():
desc = lldb.SBStream()
self.symbol.GetDescription(desc)
description = desc.GetData()
else:
description = 'No symbol info'
lines = [
'; %s' % description,
'; Source location: %s' % source_location ]
for instr in self.instructions:
addr = instr.GetAddress().GetLoadAddress(self.target)
dump = ''
for i,b in enumerate(instr.GetData(self.target).uint8):
if i >= MAX_INSTR_BYTES:
dump += '>'
break
dump += '%02X ' % b
dump = dump.ljust(MAX_INSTR_BYTES * 3 + 2)
line = '%08X: %s %-6s %s' % (addr, dump,
instr.GetMnemonic(self.target), instr.GetOperands(self.target))
comment = instr.GetComment(self.target)
if len(comment) > 0:
line += ' ; ' + comment
#line = str(instr)
lines.append(line)
return '\n'.join(lines)
|
{
"content_hash": "73bf390724f547c5e03fa37c9784d859",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 109,
"avg_line_length": 39.559322033898304,
"alnum_prop": 0.5974721508140531,
"repo_name": "NeroProtagonist/vscode-lldb",
"id": "cddb57a3e73db5a8b1013417f15790b4fe306ee1",
"size": "4668",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "adapter/disassembly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "3684"
},
{
"name": "Python",
"bytes": "99574"
},
{
"name": "Rust",
"bytes": "2078"
},
{
"name": "Shell",
"bytes": "61"
},
{
"name": "TypeScript",
"bytes": "30950"
}
],
"symlink_target": ""
}
|
import pytest
from paleomix.common.layout import Layout, LayoutError
def test_layout__minimal():
layout = Layout({})
assert layout.kwargs == {}
assert list(layout) == []
def test_layout__simple_layout():
layout = Layout({"{root}": "my_file"}, root="/root")
assert layout.kwargs == {"root": "/root"}
assert list(layout) == ["my_file"]
assert layout["my_file"] == "/root"
def test_layout__nested_layout():
layout = Layout(
{
"{root}": {
"a": {"{name}.txt": "deep_file"},
"b": "other_file",
}
},
root="/root",
name="foobar",
)
assert layout.kwargs == {"root": "/root", "name": "foobar"}
assert sorted(layout) == ["deep_file", "other_file"]
assert layout["deep_file"] == "/root/a/foobar.txt"
assert layout["other_file"] == "/root/b"
def test_layout__update():
layout1 = Layout({"{root}": {"{name}": "my_file"}})
layout2 = layout1.update(root="/tmp")
assert layout1.kwargs == {}
assert layout2.kwargs == {"root": "/tmp"}
layout3 = layout2.update(name="foobar")
assert layout2.kwargs == {"root": "/tmp"}
assert layout3.kwargs == {"root": "/tmp", "name": "foobar"}
assert layout3["my_file"] == "/tmp/foobar"
layout4 = layout3.update(root="/home/username")
assert layout3.kwargs == {"root": "/tmp", "name": "foobar"}
assert layout4.kwargs == {"root": "/home/username", "name": "foobar"}
assert layout3["my_file"] == "/tmp/foobar"
assert layout4["my_file"] == "/home/username/foobar"
def test_layout__get_without_updates():
layout1 = Layout({"{root}": {"{name}": "my_file"}})
assert layout1.get("my_file", root="foo", name="bar") == "foo/bar"
assert layout1.kwargs == {}
def test_layout__get_without_updates__override():
layout1 = Layout({"{root}": {"{name}": "my_file"}}, root="bar")
assert layout1.get("my_file", root="foo", name="bar") == "foo/bar"
assert layout1.kwargs == {"root": "bar"}
def test_layout__get_without_updates__partial():
layout1 = Layout({"{root}": {"{name}": "my_file"}}, root="foo")
assert layout1.get("my_file", name="bar") == "foo/bar"
assert layout1.kwargs == {"root": "foo"}
def test_layout__extranous_path_components():
layout = Layout({"{root}": {"{name}": "my_file"}})
assert layout.get("my_file", root="foo/", name="test") == "foo/test"
def test_layout__unnamed_field():
with pytest.raises(LayoutError, match="unnamed field are not allowed in"):
Layout({"{}": "my_file"})
def test_layout__missing_value():
layout = Layout({"{root}": "my_file"})
with pytest.raises(KeyError, match="root"):
layout["my_file"]
def test_layout__unknown_field__in_init():
with pytest.raises(LayoutError, match="unknown key"):
Layout({"{root}": "my_file"}, foobar="/path/to/somewhere")
def test_layout__unknown_field__in_update():
layout = Layout({"{root}": "my_file"})
with pytest.raises(LayoutError, match="unknown key"):
layout.update(foobar="/path/to/somewhere")
def test_layout__non_string_name():
with pytest.raises(LayoutError, match="invalid key 17"):
Layout({17: "my_file"}) # type: ignore
def test_layout__non_string_dict_value():
with pytest.raises(LayoutError, match="invalid value 17"):
Layout({"{root}": 17})
def test_layout__duplicate_path_names():
with pytest.raises(LayoutError, match="'file_1' used multiple times"):
Layout({"foo": "file_1", "{root}": {"zod": "file_1"}})
def test_layout__non_unique_key_field_name():
with pytest.raises(LayoutError, match="'file_1' used as both key and field"):
Layout({"foo": "file_1", "{root}": {"{file_1}": "file_2"}})
def test_layout__get_field():
layout = Layout({"{root}": {"b": "other_file"}}, root="/root")
assert layout.get_field("root") == "/root"
def test_layout__get_missing_field():
layout = Layout({"{root}": {"b": "other_file"}}, root="/root")
with pytest.raises(KeyError):
assert layout.get_field("unknown_key")
|
{
"content_hash": "fc1c4d29620854b7f027e4a351e0f9e7",
"timestamp": "",
"source": "github",
"line_count": 139,
"max_line_length": 81,
"avg_line_length": 29.46043165467626,
"alnum_prop": 0.5914529914529915,
"repo_name": "MikkelSchubert/paleomix",
"id": "a832b73e95d92ccf8914ae0c87ed09523e0132ac",
"size": "4095",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/common_tests/layout_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4289"
},
{
"name": "Python",
"bytes": "1251289"
},
{
"name": "R",
"bytes": "54175"
},
{
"name": "Shell",
"bytes": "3729"
}
],
"symlink_target": ""
}
|
"""
filterAnnotatedSV
~~~~~~~~~~~~~~~~~
:Description: This module will filter calls from the merged file
"""
'''
Created on Mar 17, 2015
Description: This module will filter calls from the merged file
@author: Ronak H Shah
::Inputs::
inputTxt: Filter Text File
outputDir: Output directory
outPrefix: Prefix of the output file
blacklistGenesFile: List of genes that should be eliminated
verbose: Mode
genesToKeepFile: List of genes that should be kept
::Output::
Filtered Output files
'''
import os
import logging
import checkparameters as cp
import re
import coloredlogs
import tempfile
os.environ['MPLCONFIGDIR'] = tempfile.mkdtemp() #So that matplotlib doesnot complain stale file handle
try:
import pandas as pd
except ImportError, e:
print "filterAnnotatedSV: pandas is not installed, please install pandas as it is required to run the mapping."
sys.exit(1)
# Initiate logger
logger = logging.getLogger('iCallSV.FilterDellyCalls')
coloredlogs.install(level='DEBUG')
def run(inputTxt, outputDir, outPrefix, blacklistGenesFile, verbose, genesToKeepFile="somefile.txt"):
"""
This will ``filter sv calls`` from the final merged file.
:param str inputTxt: str for the txt file to be filtered
:param str outputDir: str for the output directory
:param str outputPrefix: str prefix for the output File
:param str blacklistGenesFile: str for the txt file containing blacklisted genes
:param bool verbose: a boolean
:param str genesToKeepFile: str for the txt file containing genes to keep
:return: A str name of final sv file
:rtype: str
"""
cp.checkFile(inputTxt)
cp.checkFile(blacklistGenesFile)
cp.checkDir(outputDir)
cp.checkEmpty(outPrefix, "Prefix for the output file")
if(os.path.isfile(genesToKeepFile)):
logger.info(
"iCallSV::FilterFinalFile: Genes to Keep File Given %s and will be used.",
genesToKeepFile)
keepGenes = [line.strip() for line in open(genesToKeepFile, 'r')]
else:
keepGenes = None
inputDF = pd.read_table(inputTxt, keep_default_na='True')
outputDF = inputDF.copy()
#outputDF = pd.DataFrame(columns=inputDF.columns)
outputFile = os.path.join(outputDir, outPrefix + "_final.txt")
for index, row in inputDF.iterrows():
gene1 = row.loc['Gene1']
gene2 = row.loc['Gene2']
site1 = row.loc['Site1Description']
site2 = row.loc['Site2Description']
# skip IGR records
if("IGR" in site1 and "IGR" in site2):
igrFlag = True
else:
igrFlag = False
# check records from these gene
if(keepGenes):
keepGeneFlag = checkGeneListToKeep(gene1, gene2, keepGenes)
else:
keepGeneFlag = True
# check records from these gene
blacklistGenes = [line.strip() for line in open(blacklistGenesFile, 'r')]
blacklistGeneFlag = checkBlackListGene(gene1, gene2, blacklistGenes)
# skip record occurring within intron
eventInIntronFlag = False
if((gene1 == gene2) and ((igrFlag is False) or (blacklistGeneFlag is False)) and ("Intron" in site1 and "Intron" in site2)):
eventInIntronFlag = checkEventInIntronFlag(gene1, gene2, site1, site2)
else:
pass
if((keepGeneFlag is False) or (igrFlag) or (blacklistGeneFlag) or (eventInIntronFlag)):
if(verbose):
logger.warn(
"iCallSV::FilterFinalFile: Record: gene1:%s; gene2:%s; site1:%s; site2:%s; will be Filtered as keepGeneFlag:%s; IGR:%s; blackListGene:%s; Intronic Event:%s",
gene1,
gene2,
site1,
site2,
str(keepGeneFlag),
str(igrFlag),
str(blacklistGeneFlag),
str(eventInIntronFlag))
outputDF = outputDF.drop(index)
else:
pass
outputDF[['SV_LENGTH', 'Cosmic_Fusion_Counts']] = outputDF[['SV_LENGTH', 'Cosmic_Fusion_Counts']].astype(int)
# Write The Final Output File
outputDF.to_csv(outputFile, sep='\t', index=False)
if(verbose):
logger.info(
"iCallSV::FilterFinalFile: Finished Filtering, Final data written in %s",
outputFile)
return(outputFile)
# Check if the gene is a Keep gene
def checkGeneListToKeep(gene1, gene2, keepGenes):
if((gene1 in keepGenes) or (gene2 in keepGenes)):
kgFlag = True
else:
kgFlag = False
return(kgFlag)
# Check if the gene is a blacklist gene
def checkBlackListGene(gene1, gene2, blacklistGenes):
"""
This will ``check for blacklisted genes``
:param str gene1: str for the name of gene at breakpoint 1
:param str gene2: str for the name of gene at breakpoint 2
:param list blacklistGenes: list containing blacklisted genes
:param str genesToKeepFile: str for the txt file containing genes to keep
:return: A boolean tag indicating True or False
:rtype: bool
"""
if((gene1 in blacklistGenes) or (gene2 in blacklistGenes)):
bgFlag = True
else:
bgFlag = False
return(bgFlag)
# Check if the event is in the intron only and not affecting splicing
def checkEventInIntronFlag(gene1, gene2, site1, site2):
"""
This will ``Check if the event is in the intron only and not affecting
splicing``
:param str gene1: str for the name of gene at breakpoint 1
:param str gene2: str for the name of gene at breakpoint 2
:param str site1: str for the description of site in breakpoint 1
:param str site2: str for the description of site in breakpoint 2
:return: A boolean tag indicating True or False
:rtype: bool
"""
eviFlag = False
if(gene1 == gene2):
(s1A, s1B) = site1.split(":")
(s2A, s2B) = site2.split(":")
(s1a, s1b, s1c, s1d) = s1B.split(" ")
(s2a, s2b, s2c, s2d) = s2B.split(" ")
if(("before" in site1 and "before" in site2) or ("after" in site1 and "after" in site2)):
if(int(s1d) == int(s2d)):
if("bp" in s1a):
s1location = re.findall(r'\d+', s1a)[0]
s2location = re.findall(r'\d+', s2a)[0]
if(int(s1location) < 5 or int(s2location) < 5):
eviFlag = False
else:
eviFlag = True
return(eviFlag)
|
{
"content_hash": "759a2679ba78709beadc499b37a50dea",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 177,
"avg_line_length": 35.33879781420765,
"alnum_prop": 0.6349157259935055,
"repo_name": "rhshah/iCallSV",
"id": "dd8e4b083e72e7f1567464709762e26080e41eef",
"size": "6467",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "iCallSV/filterAnnotatedSV.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "155067"
},
{
"name": "R",
"bytes": "1926"
}
],
"symlink_target": ""
}
|
"""
Wavefront REST API Documentation
<p>The Wavefront REST API enables you to interact with Wavefront servers using standard REST API tools. You can use the REST API to automate commonly executed operations such as automatically tagging sources.</p><p>When you make REST API calls outside the Wavefront REST API documentation you must add the header \"Authorization: Bearer <<API-TOKEN>>\" to your HTTP requests.</p> # noqa: E501
OpenAPI spec version: v2
Contact: chitimba@wavefront.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import wavefront_api_client
from wavefront_api_client.models.response_container_recent_app_map_search import ResponseContainerRecentAppMapSearch # noqa: E501
from wavefront_api_client.rest import ApiException
class TestResponseContainerRecentAppMapSearch(unittest.TestCase):
"""ResponseContainerRecentAppMapSearch unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testResponseContainerRecentAppMapSearch(self):
"""Test ResponseContainerRecentAppMapSearch"""
# FIXME: construct object with mandatory attributes with example values
# model = wavefront_api_client.models.response_container_recent_app_map_search.ResponseContainerRecentAppMapSearch() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "680117b47b8cc8672fe7419d1f030297",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 409,
"avg_line_length": 38.28947368421053,
"alnum_prop": 0.7498281786941581,
"repo_name": "wavefrontHQ/python-client",
"id": "c76f269ce1ef9749fe3313c8e5fc114bf759d045",
"size": "1472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_response_container_recent_app_map_search.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4642252"
},
{
"name": "Shell",
"bytes": "3458"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.