text
stringlengths 4
1.02M
| meta
dict |
|---|---|
import re
import sys
from collections import defaultdict
from datetime import datetime
from json import loads
from urllib.request import urlopen
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.validators import URLValidator, ValidationError
from django.core.exceptions import ImproperlyConfigured
from django.utils import timezone as tz
from config.utils import get_site_config
from events.models import Event, Ticket
from people.models import User, TShirtSize
from slack.utils import post_notification
def strip_brackets(string):
return re.sub(r"\[.+\]", "", string).strip()
def parse_datetime(string):
if string:
dttm = datetime.strptime(string, "%Y-%m-%d %H:%M:%S")
return tz.make_aware(dttm)
class Command(BaseCommand):
help = "Loads tickets from Entrio"
def add_arguments(self, parser):
config = get_site_config()
parser.add_argument('event_id', nargs='?', type=int, default=config.active_event_id)
def handle(self, *args, **options):
source_url = settings.ENTRIO_VISITORS_URL
event = Event.objects.get(pk=options['event_id'])
if not source_url:
raise ImproperlyConfigured("settings.ENTRIO_VISITORS_URL is not set")
self.validate_url(source_url)
print("Loading data from %s" % source_url)
data = self.fetch_entrio_data(source_url)
print("Loaded %d tickets" % len(data))
created_tickets = []
for item in data:
code, ticket_data = self.parse_ticket_data(item)
ticket, created = Ticket.objects.all_with_revoked().update_or_create(
code=code, event=event, defaults=ticket_data)
if created:
print("Created", ticket)
created_tickets.append(ticket)
if created_tickets:
print("Notifying friends on slack...")
self.notify_slack(event, created_tickets)
print("Done")
def parse_ticket_data(self, item):
custom_fields = self.parse_custom_fields(item)
purchased_at = parse_datetime(item.get('purchase_datetime'))
used_at = parse_datetime(item.get('scanned_datetime'))
twitter = (custom_fields.get('Twitter handle')
.replace("@", "")
.replace("https://twitter.com/", ""))
email = custom_fields.get('E-mail')
user = User.objects.filter(email=email).first() if email else None
tshirt = custom_fields.get('T-shirt size').replace('-', ' ')
tshirt = TShirtSize.objects.get(name=tshirt)
ticket_code = item.get('ticket_code')
country = custom_fields.get('Country')
# Hack to fix an issue caused by choosing the wrong field type on Entrio
if country == 'Croatia':
country = 'HR'
ticket_status = item.get('ticket_status')
if ticket_status == "1":
revoked = False
elif ticket_status == "-1":
revoked = True
else:
raise ValueError("Unknown value for ticket_status: `{ticket_status}`")
return ticket_code, {
"email": email,
"user": user,
"first_name": custom_fields.get('First name'),
"last_name": custom_fields.get('Last name'),
"country": country,
"twitter": twitter,
"company": custom_fields.get('Company name'),
"category": item.get('ticket_category'),
"promo_code": item.get('promo_discount_group') or "",
"purchased_at": purchased_at,
"used_at": used_at,
"dietary_preferences": custom_fields.get('Dietary preferences'),
"tshirt_size": tshirt,
"revoked": revoked,
}
def parse_custom_fields(self, item):
custom_fields = defaultdict(lambda: '')
for f in item.get('custom_fields').values():
custom_fields[f['name']] = f['value']
return custom_fields
def validate_url(self, source_url):
try:
validator = URLValidator()
validator(source_url)
except ValidationError:
print(self.style.ERROR("Given source_url is not a valid URL."))
sys.exit(1)
def fetch_entrio_data(self, source_url):
with urlopen(source_url) as f:
return loads(f.read().decode('utf-8'))
def notify_slack(self, event, tickets):
def format_ticket(ticket):
category = strip_brackets(ticket.category)
company = ", {}".format(ticket.company) if ticket.company else ""
return "{}{} [{}]".format(ticket.full_name, company, category)
def title(tickets):
count = len(tickets)
return "{} ticket{} sold".format(count, "s" if count > 1 else "")
def text(tickets):
return "\n".join([format_ticket(t) for t in tickets])
def totals(event):
counts = event.get_ticket_counts_by_category()
lines = ["{}: `{}`".format(strip_brackets(category), count)
for category, count in counts]
return "\n".join(lines)
post_notification(
title(tickets),
text(tickets),
"May they be touched by His Noodly Appendage",
)
|
{
"content_hash": "a72c361be6891317954dfe0e99a8a676",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 92,
"avg_line_length": 33.459119496855344,
"alnum_prop": 0.5951127819548873,
"repo_name": "WebCampZg/conference-web",
"id": "2f4a80e276b7afa9af0f027fe708780386502128",
"size": "5320",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "events/management/commands/import_tickets.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "131971"
},
{
"name": "JavaScript",
"bytes": "3928"
},
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "268738"
},
{
"name": "SCSS",
"bytes": "41619"
}
],
"symlink_target": ""
}
|
"""
lib.enthought.logger.agent
"""
|
{
"content_hash": "c36e20d10c25be49fc8a3164b6c569fc",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 26,
"avg_line_length": 11.666666666666666,
"alnum_prop": 0.6571428571428571,
"repo_name": "enthought/etsproxy",
"id": "985b4f53b0da3419885c71410f3a31e12ea565e3",
"size": "35",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "enthought/logger/agent/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "363714"
}
],
"symlink_target": ""
}
|
import unittest
class Test_get_metadata(unittest.TestCase):
def _callFUT(self, path, metadata_version=None):
from pkginfo.utils import get_metadata
if metadata_version is not None:
return get_metadata(path, metadata_version)
return get_metadata(path)
def _checkMyPackage(self, dist, filename):
self.assertEqual(dist.filename, filename)
self.assertEqual(dist.name, 'mypackage')
self.assertEqual(dist.version, '0.1')
self.assertEqual(dist.keywords, None)
self.assertEqual(list(dist.supported_platforms), [])
def _checkClassifiers(self, dist):
self.assertEqual(list(dist.classifiers),
['Development Status :: 4 - Beta',
'Environment :: Console (Text Based)',
])
def test_w_gztar(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.tar.gz' % d
dist = self._callFUT(filename)
self.assertEqual(dist.metadata_version, '1.0')
self._checkMyPackage(dist, filename)
def test_w_gztar_and_metadata_version(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.tar.gz' % d
dist = self._callFUT(filename, metadata_version='1.1')
self.assertEqual(dist.metadata_version, '1.1')
self._checkMyPackage(dist, filename)
self._checkClassifiers(dist)
def test_w_bztar(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.tar.bz2' % d
dist = self._callFUT(filename)
self.assertEqual(dist.metadata_version, '1.0')
self._checkMyPackage(dist, filename)
def test_w_bztar_and_metadata_version(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.tar.bz2' % d
dist = self._callFUT(filename, metadata_version='1.1')
self.assertEqual(dist.metadata_version, '1.1')
self._checkMyPackage(dist, filename)
self._checkClassifiers(dist)
def test_w_zip(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.zip' % d
dist = self._callFUT(filename)
self.assertEqual(dist.metadata_version, '1.0')
self._checkMyPackage(dist, filename)
def test_w_zip_and_metadata_version(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1.zip' % d
dist = self._callFUT(filename, metadata_version='1.1')
self.assertEqual(dist.metadata_version, '1.1')
self._checkMyPackage(dist, filename)
self._checkClassifiers(dist)
def test_w_egg(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1-py2.6.egg' % d
dist = self._callFUT(filename)
self.assertEqual(dist.metadata_version, '1.0')
self._checkMyPackage(dist, filename)
def test_w_egg_and_metadata_version(self):
import os
d, _ = os.path.split(__file__)
filename = '%s/../../docs/examples/mypackage-0.1-py2.6.egg' % d
dist = self._callFUT(filename, metadata_version='1.1')
self.assertEqual(dist.metadata_version, '1.1')
self._checkMyPackage(dist, filename)
self._checkClassifiers(dist)
def test_w_wheel(self):
import os
d, _ = os.path.split(__file__)
filename = ('%s/../../docs/examples/'
'mypackage-0.1-cp26-none-linux_x86_64.whl') % d
dist = self._callFUT(filename)
self.assertEqual(dist.metadata_version, '2.0')
self._checkMyPackage(dist, filename)
def test_w_wheel_and_metadata_version(self):
import os
d, _ = os.path.split(__file__)
filename = ('%s/../../docs/examples/'
'mypackage-0.1-cp26-none-linux_x86_64.whl') % d
dist = self._callFUT(filename, metadata_version='1.1')
self.assertEqual(dist.metadata_version, '1.1')
self._checkMyPackage(dist, filename)
self._checkClassifiers(dist)
def test_w_module(self):
from pkginfo.tests import _defaultMetadataVersion
EXPECTED = _defaultMetadataVersion()
import pkginfo
from pkginfo.tests import _checkSample
dist = self._callFUT(pkginfo)
self.assertEqual(dist.metadata_version, EXPECTED)
_checkSample(self, dist)
def test_w_module_and_metadata_version(self):
import pkginfo
from pkginfo.tests import _checkSample
from pkginfo.tests import _checkClassifiers
dist = self._callFUT(pkginfo, metadata_version='1.2')
self.assertEqual(dist.metadata_version, '1.2')
_checkSample(self, dist)
_checkClassifiers(self, dist)
def test_w_package_name(self):
from pkginfo.tests import _defaultMetadataVersion
EXPECTED = _defaultMetadataVersion()
from pkginfo.tests import _checkSample
dist = self._callFUT('pkginfo')
self.assertEqual(dist.metadata_version, EXPECTED)
_checkSample(self, dist)
def test_w_package_name_and_metadata_version(self):
from pkginfo.tests import _checkSample
from pkginfo.tests import _checkClassifiers
dist = self._callFUT('pkginfo', metadata_version='1.2')
self.assertEqual(dist.metadata_version, '1.2')
_checkSample(self, dist)
_checkClassifiers(self, dist)
def test_w_directory_no_EGG_INFO(self):
import os
import warnings
dir, name = os.path.split(__file__)
subdir = os.path.join(dir, 'funny')
old_filters = warnings.filters[:]
warnings.filterwarnings('ignore')
try:
dist = self._callFUT(subdir)
self.assertEqual(dist.path, subdir)
self.assertEqual(dist.name, None)
self.assertEqual(dist.version, None)
finally:
warnings.filters[:] = old_filters
def test_w_directory(self):
import os
dir, name = os.path.split(__file__)
subdir = os.path.join(dir, 'silly')
dist = self._callFUT(subdir)
self.assertEqual(dist.metadata_version, '1.0')
self.assertEqual(dist.name, 'silly')
self.assertEqual(dist.version, '0.1')
def test_w_directory_and_metadata_version(self):
import os
dir, name = os.path.split(__file__)
subdir = os.path.join(dir, 'silly')
dist = self._callFUT(subdir, metadata_version='1.2')
self.assertEqual(dist.metadata_version, '1.2')
self.assertEqual(dist.name, 'silly')
self.assertEqual(dist.version, '0.1')
|
{
"content_hash": "27a74c5b4ad504c2d48f5110c0c669da",
"timestamp": "",
"source": "github",
"line_count": 176,
"max_line_length": 71,
"avg_line_length": 38.83522727272727,
"alnum_prop": 0.6027798098024872,
"repo_name": "tnkteja/myhelp",
"id": "bd6f74aa55b526b4e2421609df55d4c0987e373b",
"size": "6835",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "virtualEnvironment/lib/python2.7/site-packages/pkginfo/tests/test_utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "10512"
},
{
"name": "Groff",
"bytes": "28"
},
{
"name": "HTML",
"bytes": "50010"
},
{
"name": "JavaScript",
"bytes": "31592"
},
{
"name": "Python",
"bytes": "2251427"
},
{
"name": "Shell",
"bytes": "3855"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='Profile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('email', models.EmailField(max_length=254, unique=True)),
('created', models.DateField(auto_now_add=True)),
('name', models.CharField(max_length=100)),
('surname', models.CharField(max_length=100, verbose_name='Last Name')),
('slug', models.SlugField()),
('updated', models.DateField(auto_now=True)),
('is_active', models.BooleanField(default=True)),
('is_admin', models.BooleanField(default=False)),
('is_staff', models.BooleanField(default=False)),
('website', models.URLField(blank=True)),
('about', models.TextField(blank=True)),
('groups', models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups')),
('user_permissions', models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions')),
],
options={
'abstract': False,
},
),
]
|
{
"content_hash": "30543fdb76b6ed1b0a6169b60c16a396",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 266,
"avg_line_length": 53.65,
"alnum_prop": 0.603448275862069,
"repo_name": "martinstastny/django-simplestore",
"id": "d6fcedcddc4fc3d0607375b06160b3149aba802d",
"size": "2219",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "simplestore/profiles/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2054"
},
{
"name": "HTML",
"bytes": "19042"
},
{
"name": "JavaScript",
"bytes": "6254"
},
{
"name": "Python",
"bytes": "88550"
}
],
"symlink_target": ""
}
|
from com.ericsson.xn.x.fm.FmCommons.NotifFunc import check_notify_accuracy
from com.ericsson.xn.commons.caseutils import pre_test_case,post_test_case
import os
root_dir = os.path.normpath(os.path.dirname(os.path.abspath(__file__))).split('com' + os.sep + 'ericsson' + os.sep + 'xn' + os.sep + 'x' + os.sep + 'fm' + os.sep + 'TestCases')[0]
notify_mapping_cfg = root_dir + os.sep + 'x' + os.sep + 'fm' + os.sep + 'nbi_mapping' + os.sep + 'hss_new_alarm.cfg'
server_info_cfg = root_dir + os.sep + 'x' + os.sep + 'pm' + os.sep + 'execute_conf.cfg'
ne_info_cfg = root_dir + os.sep + 'x' + os.sep + 'pm' + os.sep + 'nes' + os.sep + 'imshss.cfg'
def check_imshss_nbi_accuracy():
pre_test_case("check_imshss_nbi_accuracy_case","nbi_notify_accuracy")
check_notify_accuracy(ne_info_cfg,server_info_cfg,notify_mapping_cfg)
post_test_case()
|
{
"content_hash": "fd9f868b72a0de2d8b432171f4920192",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 179,
"avg_line_length": 61.285714285714285,
"alnum_prop": 0.6538461538461539,
"repo_name": "lowitty/selenium",
"id": "e620a2dffd60faac3519a2a5f767fcf56bf6c1fb",
"size": "858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "com/ericsson/xn/x/fm/TestCases/case_imshss_nbi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2173161"
}
],
"symlink_target": ""
}
|
"""
Includes the help page of the program having information about the different flags and options.
Most importantly includes examples of how the different flags are going to be used. This page
is going to be implemented iteratively i.e when a new feature is implemented this page is updated
to include info about it.
"""
# Prints help into screen.
def help():
print("""
Please refer to 'Issues' section in GitHub for any problems you may have.
-- [ Options for Help & Symbols Explained ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
help | Prints help on screen | Intel-One >> help
'|' symbol | Means 'or' | Intel-One >> click 1 | 2 means click 1 or 2
'-' symbol | Used for the short flag | Intel-One >> john smith -g
'--' symbol | Used for the long flag | Intel-One >> john smith --google
-- [ ALL in ONE Options ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-c, --company | Executes all available searches for companies | Intel-One >> glovdi -c | --company
-d, --domain | Executes all available searches for domains | Intel-One >> glovdi.com -d | --domain
-i, --individual | Executes all available searches for individuals | Intel-One >> john smith -i | --individual
-i and -l | As above including location of an individual | Intel-One >> john smith -i madrid -l
-e, --email | Performs all possible searches using an email | Intel-One >> jsmith@email.com -e | --email
-- Note: The 2 following flags below are executed whenever the above ALL in ONE flags are executed. --
-sm, --socialMedia | Executes all available searches in social media | Intel-One >> john smith -sm | --socialMedia
-s, --search | Executes all available searches in search engines | Intel-One >> glovdi -s | --search
-- [ Options for Social Media ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-rts, --rtsearch | Returns real time social media search results | Intel-One >> john smith -rts | --rtsearch
-fb, --facebook | Performs facebook search through google | Intel-One >> john smith -fb | --facebook
-ln, --linkedin | Performs linkedin search through google | Intel-One >> john smith -ln | --linkedin
-tw, --twitter | Twitter search plus retrieves tweets and analytics| Intel-One >> john smith -tw | --twitter
-in, --instagram | Performs instagram search, posts and analytics | Intel-One >> john smith -in | --instagram
-pn, --pinterest | Performs pinterest search through google | Intel-One >> john smith -pn | --pinterest
-yt, --youtube | Performs search in youtube for username | Intel-One >> john smith -yt | --youtube
-evs, --extraVid | Perfroms further search in multiple video engines | Intel-One >> john smith -evs | --extraVid
-tb, --tumblr | Performs tumblr search through google | Intel-One >> john smith -tb | --tumblr
-re, --redit | Performs reddit search and posts search | Intel-One >> john smith -re | --reddit
-ure, --userReddit | Provides insights and statistics on reddit user | Intel-One >> john smith -ure | --userReddit
-bl, --blogs | Searches in blogs about target keyword | Intel-One >> john smith -bl | --blogs
-cd ,--code | Performs github & 'nerdy data' search on repos | Intel-One >> setoolkit -cd | --code
-- [ Options for Search Engines ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-g, --google | Performs normal google search | Intel-One >> john smith -g | --google
-ddg, --ddGo | Performs search in duck duck go search engine | Intel-One >> john smith -ddg | --ddGo
-bd, --baidu | Performs search in chinese engine baidu | Intel-One >> john smith -bd | --baidu
-bg, --bing | Performs search in bing search engine | Intel-One >> john smith -bg | --bing
-qw, --qwant | Performs search in qwant search engine | Intel-One >> john smith -qw | --qwant
-km, --keyword | Performs keyword matching search on given keyword | Intel-One >> john smith -km | --keyword
-cl, --cluster | Performs clustering search for query | Intel-One >> john smith -cl | --cluster
-ex, --exciteNews | Performs search about recent news on search engine| Intel-One >> john smith -ex | --exciteNews
-oa, --oldArticles | Performs search about very old posts about query | Intel-One >> john smith -oa | --oldArticles
-ev, --emailValid | Performs email validity search for target mail | Intel-One >> jsmith@email.com -ev | --emailValid
-rss, --rssFeeds | Performs RSS search in feeds on target keyword | Intel-One >> john smith -rss | --rssFeeds
-- [ Options for People Search Engines ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-p, --people | Provides links on multiple people search engines | Intel-One >> john smith -p | --people
-p and -l | Performs specific pipl using with location | Intel-One >> john smith -p madrid -l
-- [ Options for Companies ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-cs, --compSearch | Performs search in multiple companies websites | Intel-One >> glovdi -cs | compSearch
-are, --reports | Provides 2 links with annual reports of companies | Intel-One >> glovdi -are | --reports
-cef, --emailFormat | Returns the email format of the company searched | Intel-One >> glovdi -cef | --emailFormat
-- [ Options for Domains ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-wh, --whois | Performs whois search in the target domain | Intel-One >> glovdi.com -wh | --whois
-dns, --dnsLookup | Performs a DNS lookup about target domain | Intel-One >> glovdi.com -dns | --dnsLookup
-sc, --scan | Performs a vulnerability scan using asafaweb site | Intel-One >> glovdi.com -sc | --scan
-ar, --archive | Performs search for past versions of website | Intel-One >> glovdi.com -ar | --arch
-bw, --builtWith | Shows with which tools and tech the site is built | Intel-One >> glovdi.com -bw | --builtWith
-rb, --robots | Provides the link including the robots.txt file | Intel-One >> glovdi.com -rb | --robots
-- [ Other Options ] --
Options Short/Long | Description | Example
=================== + ================================================= + =============================================
-sh, --shodan | Performs shodan search for given keyword | Intel-One >> zanussi -sh | --shodan
""")
|
{
"content_hash": "6f141721232bf50facb2cffa1ed6d992",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 126,
"avg_line_length": 76.75454545454545,
"alnum_prop": 0.4939002724150184,
"repo_name": "jkatsioloudes/Who-Dis",
"id": "945731fd1895d4e4839b15d3c17a0d168235fed1",
"size": "8443",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "431733"
},
{
"name": "Python",
"bytes": "68323"
},
{
"name": "TeX",
"bytes": "202937"
}
],
"symlink_target": ""
}
|
from django import VERSION
if VERSION[0:2]>(1,3):
from django.conf.urls import patterns, url
else:
from django.conf.urls.defaults import patterns, url
from views import UploadFile,ImageManager,RemoteCatchImage,SearchMovie,scrawlUp
urlpatterns = patterns('',
url(r'^ImageUp/(?P<uploadpath>.*)',UploadFile,{'uploadtype':'image'}),
url(r'^FileUp/(?P<uploadpath>.*)',UploadFile,{'uploadtype':'file'}),
url(r'^scrawlUp/(?P<uploadpath>.*)$',scrawlUp),
url(r'^ImageManager/(?P<imagepath>.*)$',ImageManager),
url(r'^RemoteCatchImage/(?P<imagepath>.*)$',RemoteCatchImage),
url(r'^SearchMovie/$',SearchMovie),
)
|
{
"content_hash": "624e6ce3ff4359877432b378f2595054",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 79,
"avg_line_length": 40.625,
"alnum_prop": 0.68,
"repo_name": "cbingos/hongmafund",
"id": "66ac7d587f4508f5f5b3a3e234d5c8f9930e4914",
"size": "665",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DjangoUeditor/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "586858"
},
{
"name": "JavaScript",
"bytes": "3683726"
},
{
"name": "Python",
"bytes": "882053"
}
],
"symlink_target": ""
}
|
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class sherlockholmescoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = sherlockholmescoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 9332
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
{
"content_hash": "682b27aea06ec790a326f506831882d9",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 84,
"avg_line_length": 25.50204081632653,
"alnum_prop": 0.6464468629961587,
"repo_name": "brishtiteveja/sherlockholmescoin",
"id": "e8bc3050f0d05d0aed6e87c24eaea2cbdf45231c",
"size": "6467",
"binary": false,
"copies": "1",
"ref": "refs/heads/lite_to_sherlock",
"path": "contrib/pyminer/pyminer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "32203"
},
{
"name": "C++",
"bytes": "2625891"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "18955"
},
{
"name": "HTML",
"bytes": "50615"
},
{
"name": "Makefile",
"bytes": "14845"
},
{
"name": "NSIS",
"bytes": "6388"
},
{
"name": "Objective-C",
"bytes": "1063"
},
{
"name": "Objective-C++",
"bytes": "5864"
},
{
"name": "OpenEdge ABL",
"bytes": "161295"
},
{
"name": "Python",
"bytes": "70435"
},
{
"name": "QMake",
"bytes": "29717"
},
{
"name": "Shell",
"bytes": "13448"
}
],
"symlink_target": ""
}
|
import unittest
from testbin import TestBin
class TestBinDc(TestBin, unittest.TestCase):
def setUp(self):
self.bin = 'testdc'
def tearDown(self):
pass
|
{
"content_hash": "7f8a69dc71a163f61c6f91919238829d",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 44,
"avg_line_length": 17.8,
"alnum_prop": 0.6685393258426966,
"repo_name": "ow2-compatibleone/accords-platform",
"id": "e048426c1712761e42a1fe194c89632526344410",
"size": "207",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "testsuite/basic/testdc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3871076"
},
{
"name": "C++",
"bytes": "165729"
},
{
"name": "Java",
"bytes": "158712"
},
{
"name": "Objective-C",
"bytes": "425"
},
{
"name": "PHP",
"bytes": "96003"
},
{
"name": "Perl",
"bytes": "182"
},
{
"name": "Python",
"bytes": "180008"
},
{
"name": "Shell",
"bytes": "128402"
}
],
"symlink_target": ""
}
|
class Destination:
"""
The Destination class can be seen as a simple structure which holds a IP or Domain address and port.
Address and port combinations are often delivered in as tulpe or array, but a Destination object is
are clearer way to represent de address/port combination. The Destination class in used to communicate
target and proxy addresses to AttackMethods and Proxy's
"""
def __init__(self, address, port):
"""
Creates a new Destination instance
:param address: IP/Domain (string)
:param port: Port (int or string)
"""
self._address = address
try:
self._port = int(port)
except ValueError:
raise ValueError("Could not convert the port to an integer value.")
@property
def address(self):
"""
Get the address
:return: The address (string)
"""
return self._address
@property
def port(self):
"""
Get the port
:return: The port (int)
"""
return self._port
def __str__(self):
return "Destination with an address of " + self._address + " and a port of " + str(self._port)
|
{
"content_hash": "589a2a06742b28be4fb34cb862d69594",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 110,
"avg_line_length": 30.609756097560975,
"alnum_prop": 0.5784860557768924,
"repo_name": "Valentijn1995/Kn0ckKn0ck",
"id": "7b0762a1c52c536f1d4c2d5d5483d863b869e3ba",
"size": "1255",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Proxies/Destination.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7266"
},
{
"name": "Makefile",
"bytes": "7681"
},
{
"name": "Python",
"bytes": "76482"
}
],
"symlink_target": ""
}
|
import scout
import sys
scout.ScoutCore.load_all_modules()
def suppress_keyboard_interrupt_message():
old_excepthook = sys.excepthook
def new_hook(type, value, traceback):
if type != KeyboardInterrupt:
old_excepthook(type, value, traceback)
else:
pass
sys.excepthook = new_hook
suppress_keyboard_interrupt_message()
ret = scout.ScoutCore.run()
if ret is not None:
(empty, result) = ret
if empty:
sys.exit(3)
else:
print(result, end=' ')
sys.exit(0)
else:
sys.exit(4)
|
{
"content_hash": "45acff04be7a339bb8d2e330b84d7f1b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 50,
"avg_line_length": 18.766666666666666,
"alnum_prop": 0.6252220248667851,
"repo_name": "openSUSE/scout",
"id": "5b669f273d329a35d536d0218240d4938d71b30d",
"size": "673",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scout-cmd.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73200"
},
{
"name": "Shell",
"bytes": "12300"
}
],
"symlink_target": ""
}
|
import os
import tempfile
import unittest
import logging
from pyidf import ValidationLevel
import pyidf
from pyidf.idf import IDF
from pyidf.simulation_parameters import ZoneAirHeatBalanceAlgorithm
log = logging.getLogger(__name__)
class TestZoneAirHeatBalanceAlgorithm(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_zoneairheatbalancealgorithm(self):
pyidf.validation_level = ValidationLevel.error
obj = ZoneAirHeatBalanceAlgorithm()
# alpha
var_algorithm = "ThirdOrderBackwardDifference"
obj.algorithm = var_algorithm
idf = IDF()
idf.add(obj)
idf.save(self.path, check=False)
with open(self.path, mode='r') as f:
for line in f:
log.debug(line.strip())
idf2 = IDF(self.path)
self.assertEqual(idf2.zoneairheatbalancealgorithms[0].algorithm, var_algorithm)
|
{
"content_hash": "76ef0628134b8f1fb535e46f3dfb29b9",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 87,
"avg_line_length": 26.210526315789473,
"alnum_prop": 0.6807228915662651,
"repo_name": "rbuffat/pyidf",
"id": "7acebe539bbcb1a770266204e70d8834f88bfe6b",
"size": "996",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_zoneairheatbalancealgorithm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "22271673"
}
],
"symlink_target": ""
}
|
"""
Tests for L{twisted.internet.protocol}.
"""
from __future__ import division, absolute_import
from zope.interface.verify import verifyObject
from zope.interface import implementer
from twisted.python.failure import Failure
from twisted.internet.interfaces import (IProtocol, ILoggingContext,
IProtocolFactory, IConsumer)
from twisted.internet.defer import CancelledError
from twisted.internet.protocol import (
Protocol, ClientCreator, Factory, ProtocolToConsumerAdapter,
ConsumerToProtocolAdapter)
from twisted.internet.task import Clock
from twisted.trial.unittest import TestCase
from twisted.test.proto_helpers import MemoryReactor, StringTransport
class MemoryConnector:
_disconnected = False
def disconnect(self):
self._disconnected = True
class MemoryReactorWithConnectorsAndTime(MemoryReactor, Clock):
"""
An extension of L{MemoryReactor} which returns L{IConnector}
providers from its C{connectTCP} method.
"""
def __init__(self):
MemoryReactor.__init__(self)
Clock.__init__(self)
self.connectors = []
def connectTCP(self, *a, **kw):
MemoryReactor.connectTCP(self, *a, **kw)
connector = MemoryConnector()
self.connectors.append(connector)
return connector
def connectUNIX(self, *a, **kw):
MemoryReactor.connectUNIX(self, *a, **kw)
connector = MemoryConnector()
self.connectors.append(connector)
return connector
def connectSSL(self, *a, **kw):
MemoryReactor.connectSSL(self, *a, **kw)
connector = MemoryConnector()
self.connectors.append(connector)
return connector
class ClientCreatorTests(TestCase):
"""
Tests for L{twisted.internet.protocol.ClientCreator}.
"""
def _basicConnectTest(self, check):
"""
Helper for implementing a test to verify that one of the I{connect}
methods of L{ClientCreator} passes the right arguments to the right
reactor method.
@param check: A function which will be invoked with a reactor and a
L{ClientCreator} instance and which should call one of the
L{ClientCreator}'s I{connect} methods and assert that all of its
arguments except for the factory are passed on as expected to the
reactor. The factory should be returned.
"""
class SomeProtocol(Protocol):
pass
reactor = MemoryReactorWithConnectorsAndTime()
cc = ClientCreator(reactor, SomeProtocol)
factory = check(reactor, cc)
protocol = factory.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
def test_connectTCP(self):
"""
L{ClientCreator.connectTCP} calls C{reactor.connectTCP} with the host
and port information passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectTCP('example.com', 1234, 4321, ('1.2.3.4', 9876))
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('1.2.3.4', 9876))
return factory
self._basicConnectTest(check)
def test_connectUNIX(self):
"""
L{ClientCreator.connectUNIX} calls C{reactor.connectUNIX} with the
filename passed to it, and with a factory which will construct the
protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
cc.connectUNIX('/foo/bar', 123, True)
address, factory, timeout, checkPID = reactor.unixClients.pop()
self.assertEqual(address, '/foo/bar')
self.assertEqual(timeout, 123)
self.assertEqual(checkPID, True)
return factory
self._basicConnectTest(check)
def test_connectSSL(self):
"""
L{ClientCreator.connectSSL} calls C{reactor.connectSSL} with the host,
port, and context factory passed to it, and with a factory which will
construct the protocol passed to L{ClientCreator.__init__}.
"""
def check(reactor, cc):
expectedContextFactory = object()
cc.connectSSL('example.com', 1234, expectedContextFactory, 4321, ('4.3.2.1', 5678))
host, port, factory, contextFactory, timeout, bindAddress = reactor.sslClients.pop()
self.assertEqual(host, 'example.com')
self.assertEqual(port, 1234)
self.assertIdentical(contextFactory, expectedContextFactory)
self.assertEqual(timeout, 4321)
self.assertEqual(bindAddress, ('4.3.2.1', 5678))
return factory
self._basicConnectTest(check)
def _cancelConnectTest(self, connect):
"""
Helper for implementing a test to verify that cancellation of the
L{Deferred} returned by one of L{ClientCreator}'s I{connect} methods is
implemented to cancel the underlying connector.
@param connect: A function which will be invoked with a L{ClientCreator}
instance as an argument and which should call one its I{connect}
methods and return the result.
@return: A L{Deferred} which fires when the test is complete or fails if
there is a problem.
"""
reactor = MemoryReactorWithConnectorsAndTime()
cc = ClientCreator(reactor, Protocol)
d = connect(cc)
connector = reactor.connectors.pop()
self.assertFalse(connector._disconnected)
d.cancel()
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCP(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectTCP('example.com', 1234)
return self._cancelConnectTest(connect)
def test_cancelConnectUNIX(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectUNIX('/foo/bar')
return self._cancelConnectTest(connect)
def test_cancelConnectSSL(self):
"""
The L{Deferred} returned by L{ClientCreator.connectTCP} can be cancelled
to abort the connection attempt before it completes.
"""
def connect(cc):
return cc.connectSSL('example.com', 1234, object())
return self._cancelConnectTest(connect)
def _cancelConnectTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection is set up but before it is fired with the
resulting protocol instance.
"""
reactor = MemoryReactorWithConnectorsAndTime()
cc = ClientCreator(reactor, Protocol)
d = connect(reactor, cc)
connector = reactor.connectors.pop()
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, disconnecting the transport just set up and
# cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
# A real connector implementation is responsible for disconnecting the
# transport as well. For our purposes, just check that someone told the
# connector to disconnect.
self.assertTrue(connector._disconnected)
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPTimeout(self):
"""
L{ClientCreator.connectTCP} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectUNIXTimeout(self):
"""
L{ClientCreator.connectUNIX} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def test_cancelConnectSSLTimeout(self):
"""
L{ClientCreator.connectSSL} inserts a very short delayed call between
the time the connection is established and the time the L{Deferred}
returned from one of its connect methods actually fires. If the
L{Deferred} is cancelled in this interval, the established connection is
closed, the timeout is cancelled, and the L{Deferred} fails with
L{CancelledError}.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
protocol = factory.buildProtocol(None)
transport = StringTransport()
protocol.makeConnection(transport)
return d
return self._cancelConnectTimeoutTest(connect)
def _cancelConnectFailedTimeoutTest(self, connect):
"""
Like L{_cancelConnectTest}, but for the case where the L{Deferred} is
cancelled after the connection attempt has failed but before it is fired
with the resulting failure.
"""
reactor = MemoryReactorWithConnectorsAndTime()
cc = ClientCreator(reactor, Protocol)
d, factory = connect(reactor, cc)
connector = reactor.connectors.pop()
factory.clientConnectionFailed(
connector, Failure(Exception("Simulated failure")))
# Sanity check - there is an outstanding delayed call to fire the
# Deferred.
self.assertEqual(len(reactor.getDelayedCalls()), 1)
# Cancel the Deferred, cancelling the delayed call.
d.cancel()
self.assertEqual(reactor.getDelayedCalls(), [])
return self.assertFailure(d, CancelledError)
def test_cancelConnectTCPFailedTimeout(self):
"""
Similar to L{test_cancelConnectTCPTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectTCP('example.com', 1234)
host, port, factory, timeout, bindAddress = reactor.tcpClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectUNIXFailedTimeout(self):
"""
Similar to L{test_cancelConnectUNIXTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectUNIX('/foo/bar')
address, factory, timeout, bindAddress = reactor.unixClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
def test_cancelConnectSSLFailedTimeout(self):
"""
Similar to L{test_cancelConnectSSLTimeout}, but for the case where the
connection attempt fails.
"""
def connect(reactor, cc):
d = cc.connectSSL('example.com', 1234, object())
host, port, factory, contextFactory, timeout, bindADdress = reactor.sslClients.pop()
return d, factory
return self._cancelConnectFailedTimeoutTest(connect)
class ProtocolTests(TestCase):
"""
Tests for L{twisted.internet.protocol.Protocol}.
"""
def test_interfaces(self):
"""
L{Protocol} instances provide L{IProtocol} and L{ILoggingContext}.
"""
proto = Protocol()
self.assertTrue(verifyObject(IProtocol, proto))
self.assertTrue(verifyObject(ILoggingContext, proto))
def test_logPrefix(self):
"""
L{Protocol.logPrefix} returns the protocol class's name.
"""
class SomeThing(Protocol):
pass
self.assertEqual("SomeThing", SomeThing().logPrefix())
def test_makeConnection(self):
"""
L{Protocol.makeConnection} sets the given transport on itself, and
then calls C{connectionMade}.
"""
result = []
class SomeProtocol(Protocol):
def connectionMade(self):
result.append(self.transport)
transport = object()
protocol = SomeProtocol()
protocol.makeConnection(transport)
self.assertEqual(result, [transport])
class FactoryTests(TestCase):
"""
Tests for L{protocol.Factory}.
"""
def test_interfaces(self):
"""
L{Factory} instances provide both L{IProtocolFactory} and
L{ILoggingContext}.
"""
factory = Factory()
self.assertTrue(verifyObject(IProtocolFactory, factory))
self.assertTrue(verifyObject(ILoggingContext, factory))
def test_logPrefix(self):
"""
L{Factory.logPrefix} returns the name of the factory class.
"""
class SomeKindOfFactory(Factory):
pass
self.assertEqual("SomeKindOfFactory", SomeKindOfFactory().logPrefix())
def test_defaultBuildProtocol(self):
"""
L{Factory.buildProtocol} by default constructs a protocol by
calling its C{protocol} attribute, and attaches the factory to the
result.
"""
class SomeProtocol(Protocol):
pass
f = Factory()
f.protocol = SomeProtocol
protocol = f.buildProtocol(None)
self.assertIsInstance(protocol, SomeProtocol)
self.assertIdentical(protocol.factory, f)
class AdapterTests(TestCase):
"""
Tests for L{ProtocolToConsumerAdapter} and L{ConsumerToProtocolAdapter}.
"""
def test_protocolToConsumer(self):
"""
L{IProtocol} providers can be adapted to L{IConsumer} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
p = Protocol()
p.dataReceived = result.append
consumer = IConsumer(p)
consumer.write(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(consumer, ProtocolToConsumerAdapter)
def test_consumerToProtocol(self):
"""
L{IConsumer} providers can be adapted to L{IProtocol} providers using
L{ProtocolToConsumerAdapter}.
"""
result = []
@implementer(IConsumer)
class Consumer(object):
def write(self, d):
result.append(d)
c = Consumer()
protocol = IProtocol(c)
protocol.dataReceived(b"hello")
self.assertEqual(result, [b"hello"])
self.assertIsInstance(protocol, ConsumerToProtocolAdapter)
|
{
"content_hash": "7b91961e3136f0901ca975c9c51fbba1",
"timestamp": "",
"source": "github",
"line_count": 454,
"max_line_length": 96,
"avg_line_length": 35.62114537444934,
"alnum_prop": 0.6406134058867178,
"repo_name": "biddisco/VTK",
"id": "8d7108b16cfa1bf23996375fc66252f7226af555",
"size": "16245",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "ThirdParty/Twisted/twisted/internet/test/test_protocol.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "37444"
},
{
"name": "C",
"bytes": "45542302"
},
{
"name": "C++",
"bytes": "60467840"
},
{
"name": "CSS",
"bytes": "157961"
},
{
"name": "Cuda",
"bytes": "28721"
},
{
"name": "GAP",
"bytes": "14120"
},
{
"name": "IDL",
"bytes": "4406"
},
{
"name": "Java",
"bytes": "184678"
},
{
"name": "JavaScript",
"bytes": "978324"
},
{
"name": "Objective-C",
"bytes": "121232"
},
{
"name": "Objective-C++",
"bytes": "101052"
},
{
"name": "Pascal",
"bytes": "3255"
},
{
"name": "Perl",
"bytes": "177007"
},
{
"name": "Python",
"bytes": "13262355"
},
{
"name": "Shell",
"bytes": "41929"
},
{
"name": "Tcl",
"bytes": "1894036"
}
],
"symlink_target": ""
}
|
import datetime
import json
import re
from six.moves import urllib
import httmock
import requests
import six
from girder.constants import SettingKey
from girder.exceptions import ValidationException
from girder.models.setting import Setting
from girder.models.token import Token
from girder.models.user import User
from tests import base
from girder_oauth.constants import PluginSettings
from girder_oauth.providers.base import ProviderBase
from girder_oauth.providers.google import Google
def setUpModule():
base.enabledPlugins.append('oauth')
base.startServer()
def tearDownModule():
base.stopServer()
class OauthTest(base.TestCase):
def setUp(self):
base.TestCase.setUp(self)
self.adminUser = User().createUser(
email='rocky@phila.pa.us',
login='rocky',
firstName='Robert',
lastName='Balboa',
password='adrian',
admin=True
)
# Specifies which test account (typically 'new' or 'existing') a
# redirect to a provider will simulate authentication for
self.accountType = None
def testDeriveLogin(self):
"""
Unit tests the _deriveLogin method of the provider classes.
"""
login = ProviderBase._deriveLogin('1234@mail.com', 'John', 'Doe')
self.assertEqual(login, 'johndoe')
login = ProviderBase._deriveLogin('hello#world#foo@mail.com', 'A', 'B')
self.assertEqual(login, 'helloworldfoo')
login = ProviderBase._deriveLogin('hello.world@mail.com', 'A', 'B', 'user2')
self.assertEqual(login, 'user2')
# This should conflict with the saved admin user
login = ProviderBase._deriveLogin('rocky@phila.pa.us', 'Robert', 'Balboa', 'rocky')
self.assertEqual(login, 'rocky1')
def _testSettings(self, providerInfo):
Setting().set(SettingKey.REGISTRATION_POLICY, 'closed')
self.accountType = 'new'
# We should get an empty listing when no providers are set up
params = {
'key': PluginSettings.PROVIDERS_ENABLED,
'value': []
}
resp = self.request('/system/setting', user=self.adminUser, method='PUT', params=params)
self.assertStatusOk(resp)
resp = self.request('/oauth/provider', exception=True, params={
'redirect': 'http://localhost/#foo/bar',
'list': True
})
self.assertStatusOk(resp)
self.assertFalse(resp.json)
# Turn on provider, but don't set other settings
params = {
'list': json.dumps([{
'key': PluginSettings.PROVIDERS_ENABLED,
'value': [providerInfo['id']]
}])
}
resp = self.request('/system/setting', user=self.adminUser, method='PUT', params=params)
self.assertStatusOk(resp)
resp = self.request('/oauth/provider', exception=True, params={
'redirect': 'http://localhost/#foo/bar'
})
self.assertStatus(resp, 500)
# Set up provider normally
params = {
'list': json.dumps([
{
'key': PluginSettings.PROVIDERS_ENABLED,
'value': [providerInfo['id']]
}, {
'key': providerInfo['client_id']['key'],
'value': providerInfo['client_id']['value']
}, {
'key': providerInfo['client_secret']['key'],
'value': providerInfo['client_secret']['value']
}
])
}
resp = self.request('/system/setting', user=self.adminUser, method='PUT', params=params)
self.assertStatusOk(resp)
# No need to re-fetch and test all of these settings values; they will
# be implicitly tested later
def _testOauthTokenAsParam(self, providerInfo):
self.accountType = 'existing'
def _getCallbackParams(providerInfo, redirect):
resp = self.request('/oauth/provider', params={
'redirect': redirect,
'list': True
})
self.assertStatusOk(resp)
providerResp = resp.json[0]
resp = requests.get(providerResp['url'], allow_redirects=False)
self.assertEqual(resp.status_code, 302)
callbackLoc = urllib.parse.urlparse(resp.headers['location'])
self.assertEqual(
callbackLoc.path, r'/api/v1/oauth/%s/callback' % providerInfo['id'])
callbackLocQuery = urllib.parse.parse_qs(callbackLoc.query)
self.assertNotHasKeys(callbackLocQuery, ('error',))
callbackParams = {
key: val[0] for key, val in six.viewitems(callbackLocQuery)
}
return callbackParams
redirect = 'http://localhost/#foo/bar?token={girderToken}'
params = _getCallbackParams(providerInfo, redirect)
resp = self.request(
'/oauth/%s/callback' % providerInfo['id'], params=params, isJson=False)
self.assertStatus(resp, 303)
self.assertTrue('girderToken' in resp.cookie)
self.assertEqual(
resp.headers['Location'],
redirect.format(girderToken=resp.cookie['girderToken'].value))
redirect = 'http://localhost/#foo/bar?token={foobar}'
params = _getCallbackParams(providerInfo, redirect)
resp = self.request(
'/oauth/%s/callback' % providerInfo['id'], params=params, isJson=False)
self.assertStatus(resp, 303)
self.assertTrue('girderToken' in resp.cookie)
self.assertEqual(resp.headers['Location'], redirect)
def _testOauth(self, providerInfo):
# Close registration to start off, and simulate a new user
self._testSettings(providerInfo)
# Make sure that if no list param is passed, we receive the old format
resp = self.request('/oauth/provider', params={
'redirect': 'http://localhost/#foo/bar'
})
self.assertStatusOk(resp)
self.assertIsInstance(resp.json, dict)
self.assertEqual(len(resp.json), 1)
self.assertIn(providerInfo['name'], resp.json)
six.assertRegex(self, resp.json[providerInfo['name']], providerInfo['url_re'])
# This will need to be called several times, to get fresh tokens
def getProviderResp():
resp = self.request('/oauth/provider', params={
'redirect': 'http://localhost/#foo/bar',
'list': True
})
self.assertStatusOk(resp)
self.assertIsInstance(resp.json, list)
self.assertEqual(len(resp.json), 1)
providerResp = resp.json[0]
self.assertSetEqual(set(six.viewkeys(providerResp)), {'id', 'name', 'url'})
self.assertEqual(providerResp['id'], providerInfo['id'])
self.assertEqual(providerResp['name'], providerInfo['name'])
six.assertRegex(self, providerResp['url'], providerInfo['url_re'])
redirectParams = urllib.parse.parse_qs(
urllib.parse.urlparse(providerResp['url']).query)
csrfTokenParts = redirectParams['state'][0].partition('.')
token = Token().load(csrfTokenParts[0], force=True, objectId=False)
self.assertLess(
token['expires'],
datetime.datetime.utcnow() + datetime.timedelta(days=0.30))
self.assertEqual(csrfTokenParts[2], 'http://localhost/#foo/bar')
return providerResp
# Try the new format listing
getProviderResp()
# Try callback, for a nonexistent provider
resp = self.request('/oauth/foobar/callback')
self.assertStatus(resp, 400)
# Try callback, without providing any params
resp = self.request('/oauth/%s/callback' % providerInfo['id'])
self.assertStatus(resp, 400)
# Try callback, providing params as though the provider failed
resp = self.request(
'/oauth/%s/callback' % providerInfo['id'],
params={
'code': None,
'error': 'some_custom_error',
}, exception=True)
self.assertStatus(resp, 502)
self.assertEqual(resp.json['message'], "Provider returned error: 'some_custom_error'.")
# This will need to be called several times, to use fresh tokens
def getCallbackParams(providerResp):
resp = requests.get(providerResp['url'], allow_redirects=False)
self.assertEqual(resp.status_code, 302)
callbackLoc = urllib.parse.urlparse(resp.headers['location'])
self.assertEqual(
callbackLoc.path, r'/api/v1/oauth/%s/callback' % providerInfo['id'])
callbackLocQuery = urllib.parse.parse_qs(callbackLoc.query)
self.assertNotHasKeys(callbackLocQuery, ('error',))
callbackParams = {
key: val[0] for key, val in six.viewitems(callbackLocQuery)
}
return callbackParams
# Call (simulated) external provider
getCallbackParams(getProviderResp())
# Try callback, with incorrect CSRF token
params = getCallbackParams(getProviderResp())
params['state'] = 'something_wrong'
resp = self.request('/oauth/%s/callback' % providerInfo['id'], params=params)
self.assertStatus(resp, 403)
self.assertTrue(
resp.json['message'].startswith('Invalid CSRF token'))
# Try callback, with expired CSRF token
params = getCallbackParams(getProviderResp())
token = Token().load(params['state'].partition('.')[0], force=True, objectId=False)
token['expires'] -= datetime.timedelta(days=1)
Token().save(token)
resp = self.request('/oauth/%s/callback' % providerInfo['id'], params=params)
self.assertStatus(resp, 403)
self.assertTrue(resp.json['message'].startswith('Expired CSRF token'))
# Try callback, with a valid CSRF token but no redirect
params = getCallbackParams(getProviderResp())
params['state'] = params['state'].partition('.')[0]
resp = self.request('/oauth/%s/callback' % providerInfo['id'], params=params)
self.assertStatus(resp, 400)
self.assertTrue(resp.json['message'].startswith('No redirect location'))
# Try callback, with incorrect code
params = getCallbackParams(getProviderResp())
params['code'] = 'something_wrong'
resp = self.request('/oauth/%s/callback' % providerInfo['id'], params=params)
self.assertStatus(resp, 502)
# Try callback, with real parameters from provider, but still for the
# 'new' account
params = getCallbackParams(getProviderResp())
resp = self.request('/oauth/%s/callback' % providerInfo['id'], params=params)
self.assertStatus(resp, 400)
self.assertTrue(
resp.json['message'].startswith('Registration on this instance is closed.'))
# This will need to be called several times, and will do a normal login
def doOauthLogin(accountType):
self.accountType = accountType
params = getCallbackParams(getProviderResp())
resp = self.request(
'/oauth/%s/callback' % providerInfo['id'], params=params, isJson=False)
self.assertStatus(resp, 303)
self.assertEqual(resp.headers['Location'], 'http://localhost/#foo/bar')
self.assertTrue('girderToken' in resp.cookie)
resp = self.request('/user/me', token=resp.cookie['girderToken'].value)
user = resp.json
self.assertStatusOk(resp)
self.assertEqual(
user['email'], providerInfo['accounts'][accountType]['user']['email'])
self.assertEqual(
user['login'], providerInfo['accounts'][accountType]['user']['login'])
self.assertEqual(
user['firstName'], providerInfo['accounts'][accountType]['user']['firstName'])
self.assertEqual(
user['lastName'], providerInfo['accounts'][accountType]['user']['lastName'])
return user
# Try callback for the 'existing' account, which should succeed
existing = doOauthLogin('existing')
# Hit validation exception on ignore registration policy setting
with self.assertRaises(ValidationException):
Setting().set(PluginSettings.IGNORE_REGISTRATION_POLICY, 'foo')
# Try callback for the 'new' account, with registration policy ignored
Setting().set(PluginSettings.IGNORE_REGISTRATION_POLICY, True)
new = doOauthLogin('new')
# Password login for 'new' OAuth-only user should fail gracefully
newUser = providerInfo['accounts']['new']['user']
resp = self.request('/user/authentication', basicAuth='%s:mypasswd' % newUser['login'])
self.assertStatus(resp, 400)
self.assertTrue(resp.json['message'].startswith('You don\'t have a password.'))
# Reset password for 'new' OAuth-only user should work
self.assertTrue(base.mockSmtp.isMailQueueEmpty())
resp = self.request(
'/user/password/temporary', method='PUT', params={
'email': providerInfo['accounts']['new']['user']['email']})
self.assertStatusOk(resp)
self.assertEqual(resp.json['message'], 'Sent temporary access email.')
self.assertTrue(base.mockSmtp.waitForMail())
msg = base.mockSmtp.getMail(parse=True)
# Pull out the auto-generated token from the email
body = msg.get_payload(decode=True).decode('utf8')
search = re.search('<a href="(.*)">', body)
link = search.group(1)
linkParts = link.split('/')
userId = linkParts[-3]
tokenId = linkParts[-1]
tempToken = Token().load(tokenId, force=True, objectId=False)
resp = self.request(
'/user/password/temporary/' + userId, method='GET', params={'token': tokenId})
self.assertStatusOk(resp)
self.assertEqual(resp.json['user']['login'], newUser['login'])
# We should now be able to change the password
resp = self.request(
'/user/password', method='PUT', user=resp.json['user'], params={
'old': tokenId,
'new': 'mypasswd'
})
self.assertStatusOk(resp)
# The temp token should get deleted on password change
token = Token().load(tempToken, force=True, objectId=False)
self.assertEqual(token, None)
# Password login for 'new' OAuth-only user should now succeed
resp = self.request('/user/authentication', basicAuth='%s:mypasswd' % newUser['login'])
self.assertStatusOk(resp)
return existing, new
@httmock.all_requests
def mockOtherRequest(self, url, request):
raise Exception('Unexpected url %s' % str(request.url))
def testGoogleOauth(self): # noqa
providerInfo = {
'id': 'google',
'name': 'Google',
'client_id': {
'key': PluginSettings.GOOGLE_CLIENT_ID,
'value': 'google_test_client_id'
},
'client_secret': {
'key': PluginSettings.GOOGLE_CLIENT_SECRET,
'value': 'google_test_client_secret'
},
'allowed_callback_re': r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/google/callback$',
'url_re': r'^https://accounts\.google\.com/o/oauth2/auth',
'accounts': {
'existing': {
'auth_code': 'google_existing_auth_code',
'access_token': 'google_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'google',
'id': '5326'
}
}
},
'new': {
'auth_code': 'google_new_auth_code',
'access_token': 'google_new_test_token',
'user': {
# this login is not provided by Google, but will be
# created internally by _deriveLogin
'login': 'creed',
'email': 'creed@la.ca.us',
'firstName': 'Apollo',
'lastName': 'Creed',
'oauth': {
'provider': 'google',
'id': 'the1best'
}
}
}
}
}
# Test inclusion of custom scope
Google.addScopes(['custom_scope', 'foo'])
@httmock.urlmatch(scheme='https', netloc='^accounts.google.com$',
path='^/o/oauth2/auth$', method='GET')
def mockGoogleRedirect(url, request):
try:
params = urllib.parse.parse_qs(url.query)
self.assertEqual(params['response_type'], ['code'])
self.assertEqual(params['access_type'], ['online'])
self.assertEqual(params['scope'], ['profile email custom_scope foo'])
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
try:
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (params['redirect_uri'][0], returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^accounts.google.com$',
path='^/o/oauth2/token$', method='POST')
def mockGoogleToken(url, request):
try:
params = urllib.parse.parse_qs(request.body)
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertEqual(params['grant_type'], ['authorization_code'])
self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']])
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
return json.dumps({
'token_type': 'Bearer',
'access_token': account['access_token'],
'expires_in': 3546,
'id_token': 'google_id_token'
})
@httmock.urlmatch(scheme='https', netloc='^www.googleapis.com$',
path='^/plus/v1/people/me$', method='GET')
def mockGoogleApi(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
params = urllib.parse.parse_qs(url.query)
self.assertSetEqual(set(params['fields'][0].split(',')), {'id', 'emails', 'name'})
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'name': {
'givenName': account['user']['firstName'],
'familyName': account['user']['lastName']
},
'emails': [
{
'type': 'other',
'value': 'styx@hades.gov'
}, {
'type': 'account',
'value': account['user']['email']
}
]
})
with httmock.HTTMock(
mockGoogleRedirect,
mockGoogleToken,
mockGoogleApi,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
def testGithubOauth(self): # noqa
providerInfo = {
'id': 'github',
'name': 'GitHub',
'client_id': {
'key': PluginSettings.GITHUB_CLIENT_ID,
'value': 'github_test_client_id'
},
'client_secret': {
'key': PluginSettings.GITHUB_CLIENT_SECRET,
'value': 'github_test_client_secret'
},
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/github/callback$',
'url_re': r'^https://github\.com/login/oauth/authorize',
'accounts': {
'existing': {
'auth_code': 'github_existing_auth_code',
'access_token': 'github_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'github',
'id': '2399'
}
}
},
'new': {
'auth_code': 'github_new_auth_code',
'access_token': 'github_new_test_token',
'user': {
# login may be provided externally by GitHub; for
# simplicity here, do not use a username with whitespace
# or underscores
'login': 'drago',
'email': 'metaphor@labs.ussr.gov',
'firstName': 'Ivan',
'lastName': 'Drago',
'oauth': {
'provider': 'github',
'id': 1985
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^github.com$',
path='^/login/oauth/authorize$', method='GET')
def mockGithubRedirect(url, request):
redirectUri = None
try:
params = urllib.parse.parse_qs(url.query)
# Check redirect_uri first, so other errors can still redirect
redirectUri = params['redirect_uri'][0]
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
six.assertRegex(self, redirectUri, providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
self.assertEqual(params['scope'], ['user:email'])
except (KeyError, AssertionError) as e:
returnQuery = urllib.parse.urlencode({
'error': repr(e),
})
else:
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (redirectUri, returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^github.com$',
path='^/login/oauth/access_token$', method='POST')
def mockGithubToken(url, request):
try:
self.assertEqual(request.headers['Accept'], 'application/json')
params = urllib.parse.parse_qs(request.body)
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']])
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
except (KeyError, AssertionError) as e:
returnBody = json.dumps({
'error': repr(e),
'error_description': repr(e)
})
else:
returnBody = json.dumps({
'token_type': 'bearer',
'access_token': account['access_token'],
'scope': 'user:email'
})
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json'
},
'content': returnBody
}
@httmock.urlmatch(scheme='https', netloc='^api.github.com$', path='^/user$', method='GET')
def mockGithubApiUser(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'token %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'login': account['user']['login'],
'name': '%s %s' % (account['user']['firstName'], account['user']['lastName'])
})
@httmock.urlmatch(scheme='https', netloc='^api.github.com$',
path='^/user/emails$', method='GET')
def mockGithubApiEmail(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'token %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps([
{
'primary': False,
'email': 'drago@siberia.ussr.gov',
'verified': True
}, {
'primary': True,
'email': account['user']['email'],
'verified': True
}
])
with httmock.HTTMock(
mockGithubRedirect,
mockGithubToken,
mockGithubApiUser,
mockGithubApiEmail,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
@httmock.urlmatch(scheme='https', netloc='^api.github.com$', path='^/user$', method='GET')
def mockGithubUserWithoutName(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'token %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'login': account['user']['login'],
'name': None
})
self.setUp() # Call to reset everything so we can call _testOauth again
# If no name is provided, we expect to use the github login for both
providerInfo['accounts']['existing']['user']['lastName'] = 'rocky'
providerInfo['accounts']['existing']['user']['firstName'] = 'rocky'
providerInfo['accounts']['new']['user']['lastName'] = 'drago'
providerInfo['accounts']['new']['user']['firstName'] = 'drago'
with httmock.HTTMock(
mockGithubRedirect,
mockGithubToken,
mockGithubUserWithoutName,
mockGithubApiEmail,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
def testGlobusOauth(self): # noqa
providerInfo = {
'id': 'globus',
'name': 'Globus',
'client_id': {
'key': PluginSettings.GLOBUS_CLIENT_ID,
'value': 'globus_test_client_id'
},
'client_secret': {
'key': PluginSettings.GLOBUS_CLIENT_SECRET,
'value': 'globus_test_client_secret'
},
'scope': 'urn:globus:auth:scope:auth.globus.org:view_identities openid profile email',
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/globus/callback$',
'url_re': r'^https://auth.globus.org/v2/oauth2/authorize',
'accounts': {
'existing': {
'auth_code': 'globus_existing_auth_code',
'access_token': 'globus_existing_test_token',
'id_token': 'globus_exisiting_id_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'globus',
'id': '2399'
}
}
},
'new': {
'auth_code': 'globus_new_auth_code',
'access_token': 'globus_new_test_token',
'id_token': 'globus_new_id_token',
'user': {
'login': 'metaphor',
'email': 'metaphor@labs.ussr.gov',
'firstName': 'Ivan',
'lastName': 'Drago',
'oauth': {
'provider': 'globus',
'id': 1985
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^auth.globus.org$',
path='^/v2/oauth2/authorize$', method='GET')
def mockGlobusRedirect(url, request):
try:
params = urllib.parse.parse_qs(url.query)
self.assertEqual(params['response_type'], ['code'])
self.assertEqual(params['access_type'], ['online'])
self.assertEqual(params['scope'], [providerInfo['scope']])
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e)
})
}
try:
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e)
})
}
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (params['redirect_uri'][0], returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^auth.globus.org$',
path='^/v2/oauth2/userinfo$', method='GET')
def mockGlobusUserInfo(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == \
request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
user = account['user']
return json.dumps({
'email': user['email'],
'preferred_username': user['email'],
'sub': user['oauth']['id'],
'name': '{firstName} {lastName}'.format(**user),
})
@httmock.urlmatch(scheme='https', netloc='^auth.globus.org$',
path='^/v2/oauth2/token$', method='POST')
def mockGlobusToken(url, request):
try:
self.assertEqual(request.headers['Accept'], 'application/json')
params = urllib.parse.parse_qs(request.body)
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']])
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
except (KeyError, AssertionError) as e:
returnBody = json.dumps({
'error': repr(e),
'error_description': repr(e)
})
else:
returnBody = json.dumps({
'access_token': account['access_token'],
'resource_server': 'auth.globus.org',
'expires_in': 3600,
'token_type': 'bearer',
'scope': 'urn:globus:auth:scope:auth.globus.org:monitor_ongoing',
'refresh_token': 'blah',
'id_token': account['id_token'],
'state': 'provided_by_client_to_prevent_replay_attacks',
'other_tokens': [],
})
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json'
},
'content': returnBody
}
with httmock.HTTMock(
mockGlobusRedirect,
mockGlobusUserInfo,
mockGlobusToken,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
self._testOauthTokenAsParam(providerInfo)
def testLinkedinOauth(self): # noqa
providerInfo = {
'id': 'linkedin',
'name': 'LinkedIn',
'client_id': {
'key': PluginSettings.LINKEDIN_CLIENT_ID,
'value': 'linkedin_test_client_id'
},
'client_secret': {
'key': PluginSettings.LINKEDIN_CLIENT_SECRET,
'value': 'linkedin_test_client_secret'
},
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/linkedin/callback$',
'url_re': r'^https://www\.linkedin\.com/uas/oauth2/authorization',
'accounts': {
'existing': {
'auth_code': 'linkedin_existing_auth_code',
'access_token': 'linkedin_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'linkedin',
'id': '42kD-5H'
}
}
},
'new': {
'auth_code': 'linkedin_new_auth_code',
'access_token': 'linkedin_new_test_token',
'user': {
# this login is not provided by LinkedIn, but will be
# created internally by _deriveLogin
'login': 'clubber',
'email': 'clubber@streets.chi.il.us',
'firstName': 'James',
'lastName': 'Lang',
'oauth': {
'provider': 'linkedin',
'id': '634pity-fool4'
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^www.linkedin.com$',
path='^/uas/oauth2/authorization$', method='GET')
def mockLinkedinRedirect(url, request):
try:
params = urllib.parse.parse_qs(url.query)
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
except (KeyError, AssertionError) as e:
return {
'status_code': 200,
'content': json.dumps({
'error': repr(e)
})
}
try:
self.assertEqual(params['response_type'], ['code'])
self.assertEqual(
params['scope'][0].split(' '), ['r_basicprofile', 'r_emailaddress'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
except (KeyError, AssertionError) as e:
returnQuery = urllib.parse.urlencode({
'error': repr(e),
'error_description': repr(e)
})
else:
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (params['redirect_uri'][0], returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^www.linkedin.com$',
path='^/uas/oauth2/accessToken$', method='POST')
def mockLinkedinToken(url, request):
try:
self.assertEqual(
request.headers['Content-Type'], 'application/x-www-form-urlencoded')
params = urllib.parse.parse_qs(request.body)
self.assertEqual(params['grant_type'], ['authorization_code'])
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e),
'error_description': repr(e)
})
}
try:
self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 401,
'content': json.dumps({
'error': repr(e),
'error_description': repr(e)
})
}
return json.dumps({
'access_token': account['access_token'],
'expires_in': datetime.timedelta(days=60).seconds
})
@httmock.urlmatch(scheme='https', netloc='^api.linkedin.com$',
path=r'^/v1/people/~(?::\(.+\)?)$', method='GET')
def mockLinkedinApi(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'errorCode': 0,
'message': repr(e)
})
}
try:
fieldsRe = re.match(r'^.+:\((.+)\)$', url.path)
self.assertTrue(fieldsRe)
self.assertSetEqual(
set(fieldsRe.group(1).split(',')),
{'id', 'emailAddress', 'firstName', 'lastName'})
params = urllib.parse.parse_qs(url.query)
self.assertEqual(params['format'], ['json'])
except AssertionError as e:
return {
'status_code': 400,
'content': json.dumps({
'errorCode': 0,
'message': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'firstName': account['user']['firstName'],
'lastName': account['user']['lastName'],
'emailAddress': account['user']['email']
})
with httmock.HTTMock(
mockLinkedinRedirect,
mockLinkedinToken,
mockLinkedinApi,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
def testBitbucketOauth(self): # noqa
providerInfo = {
'id': 'bitbucket',
'name': 'Bitbucket',
'client_id': {
'key': PluginSettings.BITBUCKET_CLIENT_ID,
'value': 'bitbucket_test_client_id'
},
'client_secret': {
'key': PluginSettings.BITBUCKET_CLIENT_SECRET,
'value': 'bitbucket_test_client_secret'
},
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?'
r'/api/v1/oauth/bitbucket/callback$',
'url_re': r'^https://bitbucket\.org/site/oauth2/authorize',
'accounts': {
'existing': {
'auth_code': 'bitbucket_existing_auth_code',
'access_token': 'bitbucket_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'bitbucket',
'id': '2399'
}
}
},
'new': {
'auth_code': 'bitbucket_new_auth_code',
'access_token': 'bitbucket_new_test_token',
'user': {
# login may be provided externally by Bitbucket; for
# simplicity here, do not use a username with whitespace
# or underscores
'login': 'drago',
'email': 'metaphor@labs.ussr.gov',
'firstName': 'Ivan',
'lastName': 'Drago',
'oauth': {
'provider': 'bitbucket',
'id': 1983
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^bitbucket.org$',
path='^/site/oauth2/authorize$', method='GET')
def mockBitbucketRedirect(url, request):
redirectUri = None
try:
params = urllib.parse.parse_qs(url.query)
# Check redirect_uri first, so other errors can still redirect
redirectUri = params['redirect_uri'][0]
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
six.assertRegex(self, redirectUri, providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
self.assertEqual(params['scope'], ['account'])
except (KeyError, AssertionError) as e:
returnQuery = urllib.parse.urlencode({
'error': repr(e),
'error_description': repr(e)
})
else:
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (redirectUri, returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^bitbucket.org$',
path='^/site/oauth2/access_token$', method='POST')
def mockBitbucketToken(url, request):
try:
self.assertEqual(request.headers['Accept'], 'application/json')
params = urllib.parse.parse_qs(request.body)
self.assertEqual(params['grant_type'], ['authorization_code'])
except (KeyError, AssertionError) as e:
return {
'status_code': 400,
'content': json.dumps({
'error': repr(e),
'error_description': repr(e)
})
}
try:
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']])
six.assertRegex(
self, params['redirect_uri'][0], providerInfo['allowed_callback_re'])
except (KeyError, AssertionError) as e:
returnBody = json.dumps({
'error': repr(e),
'error_description': repr(e)
})
else:
returnBody = json.dumps({
'token_type': 'bearer',
'access_token': account['access_token'],
'scope': 'account'
})
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json'
},
'content': returnBody
}
@httmock.urlmatch(scheme='https', netloc='^api.bitbucket.org$',
path='^/2.0/user$', method='GET')
def mockBitbucketApiUser(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps({
'created_on': '2011-12-20T16:34:07.132459+00:00',
'uuid': account['user']['oauth']['id'],
'location': 'Santa Monica, CA',
'links': {},
'website': 'https://tutorials.bitbucket.org/',
'username': account['user']['login'],
'display_name': '%s %s' % (
account['user']['firstName'], account['user']['lastName'])
})
@httmock.urlmatch(scheme='https', netloc='^api.bitbucket.org$',
path='^/2.0/user/emails$', method='GET')
def mockBitbucketApiEmail(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps({
'page': 1,
'pagelen': 10,
'size': 1,
'values': [{
'is_primary': True,
'is_confirmed': True,
'email': account['user']['email'],
'links': {},
'type': 'email'
}]
})
with httmock.HTTMock(
mockBitbucketRedirect,
mockBitbucketToken,
mockBitbucketApiUser,
mockBitbucketApiEmail,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
def testBoxOauth(self): # noqa
providerInfo = {
'id': 'box',
'name': 'Box',
'client_id': {
'key': PluginSettings.BOX_CLIENT_ID,
'value': 'box_test_client_id'
},
'client_secret': {
'key': PluginSettings.BOX_CLIENT_SECRET,
'value': 'box_test_client_secret'
},
'allowed_callback_re':
r'^http://127\.0\.0\.1(?::\d+)?/api/v1/oauth/box/callback$',
'url_re': r'^https://account\.box\.com/api/oauth2/authorize',
'accounts': {
'existing': {
'auth_code': 'box_existing_auth_code',
'access_token': 'box_existing_test_token',
'user': {
'login': self.adminUser['login'],
'email': self.adminUser['email'],
'firstName': self.adminUser['firstName'],
'lastName': self.adminUser['lastName'],
'oauth': {
'provider': 'box',
'id': '2481632'
}
}
},
'new': {
'auth_code': 'box_new_auth_code',
'access_token': 'box_new_test_token',
'user': {
# this login is not provided by Box, but will be
# created internally by _deriveLogin
'login': 'metaphor',
'email': 'metaphor@labs.ussr.gov',
'firstName': 'Ivan',
'lastName': 'Drago',
'oauth': {
'provider': 'box',
'id': '1985'
}
}
}
}
}
@httmock.urlmatch(scheme='https', netloc='^account.box.com$',
path='^/api/oauth2/authorize$', method='GET')
def mockBoxRedirect(url, request):
redirectUri = None
try:
params = urllib.parse.parse_qs(url.query)
# Check redirect_uri first, so other errors can still redirect
redirectUri = params['redirect_uri'][0]
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
six.assertRegex(self, redirectUri, providerInfo['allowed_callback_re'])
state = params['state'][0]
# Nothing to test for state, since provider doesn't care
except (KeyError, AssertionError) as e:
returnQuery = urllib.parse.urlencode({
'error': repr(e),
})
else:
returnQuery = urllib.parse.urlencode({
'state': state,
'code': providerInfo['accounts'][self.accountType]['auth_code']
})
return {
'status_code': 302,
'headers': {
'Location': '%s?%s' % (redirectUri, returnQuery)
}
}
@httmock.urlmatch(scheme='https', netloc='^api.box.com$',
path='^/oauth2/token$', method='POST')
def mockBoxToken(url, request):
try:
self.assertEqual(request.headers['Accept'], 'application/json')
params = urllib.parse.parse_qs(request.body)
self.assertEqual(params['client_id'], [providerInfo['client_id']['value']])
except (KeyError, AssertionError) as e:
return {
'status_code': 404,
'content': json.dumps({
'error': repr(e)
})
}
try:
for account in six.viewvalues(providerInfo['accounts']):
if account['auth_code'] == params['code'][0]:
break
else:
self.fail()
self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']])
except (KeyError, AssertionError) as e:
returnBody = json.dumps({
'error': repr(e),
'error_description': repr(e)
})
else:
returnBody = json.dumps({
'token_type': 'bearer',
'access_token': account['access_token'],
'scope': 'user:email'
})
return {
'status_code': 200,
'headers': {
'Content-Type': 'application/json'
},
'content': returnBody
}
@httmock.urlmatch(scheme='https', netloc='^api.box.com$',
path='^/2.0/users/me$', method='GET')
def mockBoxApiUser(url, request):
try:
for account in six.viewvalues(providerInfo['accounts']):
if 'Bearer %s' % account['access_token'] == request.headers['Authorization']:
break
else:
self.fail()
except AssertionError as e:
return {
'status_code': 401,
'content': json.dumps({
'message': repr(e)
})
}
return json.dumps({
'id': account['user']['oauth']['id'],
'login': account['user']['email'],
'name': '%s %s' % (account['user']['firstName'], account['user']['lastName'])
})
with httmock.HTTMock(
mockBoxRedirect,
mockBoxToken,
mockBoxApiUser,
# Must keep 'mockOtherRequest' last
self.mockOtherRequest
):
self._testOauth(providerInfo)
|
{
"content_hash": "d96014cd3ad9a52592cc65a842b8c39e",
"timestamp": "",
"source": "github",
"line_count": 1473,
"max_line_length": 99,
"avg_line_length": 41.372708757637476,
"alnum_prop": 0.4655410062026189,
"repo_name": "kotfic/girder",
"id": "2942561151d5ac883cbb8f8e3f2e0b0f69a3be9f",
"size": "61736",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/oauth/plugin_tests/oauth_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "38260"
},
{
"name": "CSS",
"bytes": "54843"
},
{
"name": "Dockerfile",
"bytes": "2482"
},
{
"name": "HCL",
"bytes": "1424"
},
{
"name": "HTML",
"bytes": "139763"
},
{
"name": "JavaScript",
"bytes": "1129529"
},
{
"name": "Mako",
"bytes": "7873"
},
{
"name": "Python",
"bytes": "2117090"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Ruby",
"bytes": "9921"
},
{
"name": "Shell",
"bytes": "2177"
}
],
"symlink_target": ""
}
|
"""Tests for the engine utils module
"""
from builtins import range
import os
from copy import deepcopy
from tempfile import mkdtemp
from shutil import rmtree
from ....testing import (assert_equal, assert_true, assert_false)
from ... import engine as pe
from ....interfaces import base as nib
from ....interfaces import utility as niu
from .... import config
from ..utils import merge_dict, clean_working_directory, write_workflow_prov
def test_identitynode_removal():
def test_function(arg1, arg2, arg3):
import numpy as np
return (np.array(arg1) + arg2 + arg3).tolist()
wf = pe.Workflow(name="testidentity")
n1 = pe.Node(niu.IdentityInterface(fields=['a', 'b']), name='src')
n1.iterables = ('b', [0, 1, 2, 3])
n1.inputs.a = [0, 1, 2, 3]
n2 = pe.Node(niu.Select(), name='selector')
wf.connect(n1, ('a', test_function, 1, -1), n2, 'inlist')
wf.connect(n1, 'b', n2, 'index')
n3 = pe.Node(niu.IdentityInterface(fields=['c', 'd']), name='passer')
n3.inputs.c = [1, 2, 3, 4]
wf.connect(n2, 'out', n3, 'd')
n4 = pe.Node(niu.Select(), name='selector2')
wf.connect(n3, ('c', test_function, 1, -1), n4, 'inlist')
wf.connect(n3, 'd', n4, 'index')
fg = wf._create_flat_graph()
wf._set_needed_outputs(fg)
eg = pe.generate_expanded_graph(deepcopy(fg))
yield assert_equal, len(eg.nodes()), 8
def test_clean_working_directory():
class OutputSpec(nib.TraitedSpec):
files = nib.traits.List(nib.File)
others = nib.File()
class InputSpec(nib.TraitedSpec):
infile = nib.File()
outputs = OutputSpec()
inputs = InputSpec()
wd = mkdtemp()
filenames = ['file.hdr', 'file.img', 'file.BRIK', 'file.HEAD',
'_0x1234.json', 'foo.txt']
outfiles = []
for filename in filenames:
outfile = os.path.join(wd, filename)
with open(outfile, 'wt') as fp:
fp.writelines('dummy')
outfiles.append(outfile)
outputs.files = outfiles[:4:2]
outputs.others = outfiles[5]
inputs.infile = outfiles[-1]
needed_outputs = ['files']
config.set_default_config()
yield assert_true, os.path.exists(outfiles[5])
config.set_default_config()
config.set('execution', 'remove_unnecessary_outputs', False)
out = clean_working_directory(outputs, wd, inputs, needed_outputs,
deepcopy(config._sections))
yield assert_true, os.path.exists(outfiles[5])
yield assert_equal, out.others, outfiles[5]
config.set('execution', 'remove_unnecessary_outputs', True)
out = clean_working_directory(outputs, wd, inputs, needed_outputs,
deepcopy(config._sections))
yield assert_true, os.path.exists(outfiles[1])
yield assert_true, os.path.exists(outfiles[3])
yield assert_true, os.path.exists(outfiles[4])
yield assert_false, os.path.exists(outfiles[5])
yield assert_equal, out.others, nib.Undefined
yield assert_equal, len(out.files), 2
config.set_default_config()
rmtree(wd)
def test_outputs_removal():
def test_function(arg1):
import os
file1 = os.path.join(os.getcwd(), 'file1.txt')
file2 = os.path.join(os.getcwd(), 'file2.txt')
fp = open(file1, 'wt')
fp.write('%d' % arg1)
fp.close()
fp = open(file2, 'wt')
fp.write('%d' % arg1)
fp.close()
return file1, file2
out_dir = mkdtemp()
n1 = pe.Node(niu.Function(input_names=['arg1'],
output_names=['file1', 'file2'],
function=test_function),
base_dir=out_dir,
name='testoutputs')
n1.inputs.arg1 = 1
n1.config = {'execution': {'remove_unnecessary_outputs': True}}
n1.config = merge_dict(deepcopy(config._sections), n1.config)
n1.run()
yield assert_true, os.path.exists(os.path.join(out_dir,
n1.name,
'file1.txt'))
yield assert_true, os.path.exists(os.path.join(out_dir,
n1.name,
'file2.txt'))
n1.needed_outputs = ['file2']
n1.run()
yield assert_false, os.path.exists(os.path.join(out_dir,
n1.name,
'file1.txt'))
yield assert_true, os.path.exists(os.path.join(out_dir,
n1.name,
'file2.txt'))
rmtree(out_dir)
class InputSpec(nib.TraitedSpec):
in_file = nib.File(exists=True, copyfile=True)
class OutputSpec(nib.TraitedSpec):
output1 = nib.traits.List(nib.traits.Int, desc='outputs')
class TestInterface(nib.BaseInterface):
input_spec = InputSpec
output_spec = OutputSpec
def _run_interface(self, runtime):
runtime.returncode = 0
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['output1'] = [1]
return outputs
def test_inputs_removal():
out_dir = mkdtemp()
file1 = os.path.join(out_dir, 'file1.txt')
fp = open(file1, 'wt')
fp.write('dummy_file')
fp.close()
n1 = pe.Node(TestInterface(),
base_dir=out_dir,
name='testinputs')
n1.inputs.in_file = file1
n1.config = {'execution': {'keep_inputs': True}}
n1.config = merge_dict(deepcopy(config._sections), n1.config)
n1.run()
yield assert_true, os.path.exists(os.path.join(out_dir,
n1.name,
'file1.txt'))
n1.inputs.in_file = file1
n1.config = {'execution': {'keep_inputs': False}}
n1.config = merge_dict(deepcopy(config._sections), n1.config)
n1.overwrite = True
n1.run()
yield assert_false, os.path.exists(os.path.join(out_dir,
n1.name,
'file1.txt'))
rmtree(out_dir)
def test_outputs_removal_wf():
def test_function(arg1):
import os
file1 = os.path.join(os.getcwd(), 'file1.txt')
file2 = os.path.join(os.getcwd(), 'file2.txt')
file3 = os.path.join(os.getcwd(), 'file3.txt')
file4 = os.path.join(os.getcwd(), 'subdir', 'file1.txt')
files = [file1, file2, file3, file4]
os.mkdir("subdir")
for filename in files:
with open(filename, 'wt') as fp:
fp.write('%d' % arg1)
return file1, file2, os.path.join(os.getcwd(), "subdir")
def test_function2(in_file, arg):
import os
in_arg = open(in_file).read()
file1 = os.path.join(os.getcwd(), 'file1.txt')
file2 = os.path.join(os.getcwd(), 'file2.txt')
file3 = os.path.join(os.getcwd(), 'file3.txt')
files = [file1, file2, file3]
for filename in files:
with open(filename, 'wt') as fp:
fp.write('%d' % arg + in_arg)
return file1, file2, 1
def test_function3(arg):
import os
return arg
out_dir = mkdtemp()
for plugin in ('Linear',): # , 'MultiProc'):
n1 = pe.Node(niu.Function(input_names=['arg1'],
output_names=['out_file1', 'out_file2', 'dir'],
function=test_function),
name='n1')
n1.inputs.arg1 = 1
n2 = pe.Node(niu.Function(input_names=['in_file', 'arg'],
output_names=['out_file1', 'out_file2', 'n'],
function=test_function2),
name='n2')
n2.inputs.arg = 2
n3 = pe.Node(niu.Function(input_names=['arg'],
output_names=['n'],
function=test_function3),
name='n3')
wf = pe.Workflow(name="node_rem_test" + plugin, base_dir=out_dir)
wf.connect(n1, "out_file1", n2, "in_file")
wf.run(plugin='Linear')
for remove_unnecessary_outputs in [True, False]:
config.set_default_config()
wf.config = {'execution': {'remove_unnecessary_outputs': remove_unnecessary_outputs}}
rmtree(os.path.join(wf.base_dir, wf.name))
wf.run(plugin=plugin)
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n1.name,
'file2.txt')) != remove_unnecessary_outputs
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n1.name,
"subdir",
'file1.txt')) != remove_unnecessary_outputs
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n1.name,
'file1.txt'))
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n1.name,
'file3.txt')) != remove_unnecessary_outputs
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n2.name,
'file1.txt'))
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n2.name,
'file2.txt'))
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n2.name,
'file3.txt')) != remove_unnecessary_outputs
n4 = pe.Node(TestInterface(), name='n4')
wf.connect(n2, "out_file1", n4, "in_file")
def pick_first(l):
return l[0]
wf.connect(n4, ("output1", pick_first), n3, "arg")
for remove_unnecessary_outputs in [True, False]:
for keep_inputs in [True, False]:
config.set_default_config()
wf.config = {'execution': {'keep_inputs': keep_inputs, 'remove_unnecessary_outputs': remove_unnecessary_outputs}}
rmtree(os.path.join(wf.base_dir, wf.name))
wf.run(plugin=plugin)
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n2.name,
'file1.txt'))
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n2.name,
'file2.txt')) != remove_unnecessary_outputs
yield assert_true, os.path.exists(os.path.join(wf.base_dir,
wf.name,
n4.name,
'file1.txt')) == keep_inputs
rmtree(out_dir)
def fwhm(fwhm):
return fwhm
def create_wf(name):
pipe = pe.Workflow(name=name)
process = pe.Node(niu.Function(input_names=['fwhm'],
output_names=['fwhm'],
function=fwhm),
name='proc')
process.iterables = ('fwhm', [0])
process2 = pe.Node(niu.Function(input_names=['fwhm'],
output_names=['fwhm'],
function=fwhm),
name='proc2')
process2.iterables = ('fwhm', [0])
pipe.connect(process, 'fwhm', process2, 'fwhm')
return pipe
def test_multi_disconnected_iterable():
out_dir = mkdtemp()
metawf = pe.Workflow(name='meta')
metawf.base_dir = out_dir
metawf.add_nodes([create_wf('wf%d' % i) for i in range(30)])
eg = metawf.run(plugin='Linear')
yield assert_equal, len(eg.nodes()), 60
rmtree(out_dir)
def test_provenance():
out_dir = mkdtemp()
metawf = pe.Workflow(name='meta')
metawf.base_dir = out_dir
metawf.add_nodes([create_wf('wf%d' % i) for i in range(1)])
eg = metawf.run(plugin='Linear')
prov_base = os.path.join(out_dir,
'workflow_provenance_test')
psg = write_workflow_prov(eg, prov_base, format='all')
yield assert_equal, len(psg.bundles), 2
yield assert_equal, len(psg.get_records()), 7
rmtree(out_dir)
|
{
"content_hash": "e171c7845e05211eee097d4d93bf751c",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 129,
"avg_line_length": 39.816326530612244,
"alnum_prop": 0.47719118400820093,
"repo_name": "FCP-INDI/nipype",
"id": "8420f587c2e7b42184de2a776df07d8802fc1350",
"size": "13771",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "nipype/pipeline/engine/tests/test_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2063"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "5280923"
},
{
"name": "Shell",
"bytes": "1958"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
import os
from app import create_app, db
from app.models import User, Follow, Role, Permission, Post, Comment, Category
from flask_script import Manager, Shell
from flask_migrate import Migrate, MigrateCommand
app = create_app(os.getenv('FLASK_CONFIG') or 'default')
manager = Manager(app)
migrate = Migrate(app, db)
def make_shell_context():
return dict(app=app, db=db, User=User, Follow=Follow, Role=Role,
Permission=Permission, Post=Post, Comment=Comment, Category=Category)
manager.add_command("shell", Shell(make_context=make_shell_context))
manager.add_command('db', MigrateCommand)
@manager.command
def test():
"""Run the unit tests."""
import unittest
tests = unittest.TestLoader().discover('tests')
unittest.TextTestRunner(verbosity=2).run(tests)
@manager.command
def profile(length=25, profile_dir=None):
"""Start the application under the code profiler."""
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length],
profile_dir=profile_dir)
app.run()
@manager.command
def deploy():
"""Run deployment tasks."""
from flask_migrate import upgrade
from app.models import Role, User, Category
# migrate database to latest revision
upgrade()
# create user roles
Role.insert_roles()
# create self-follows for all users
User.add_self_follows()
if __name__ == '__main__':
manager.run()
|
{
"content_hash": "a0b26a8c99789fb7e10b47741ef6d099",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 85,
"avg_line_length": 31.80851063829787,
"alnum_prop": 0.6963210702341137,
"repo_name": "ACLeiChen/personalBlog",
"id": "54a6fb54afb7551397607887d4b4649eafd895e4",
"size": "1517",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manage.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "349240"
},
{
"name": "HTML",
"bytes": "1041849"
},
{
"name": "JavaScript",
"bytes": "430406"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "PowerShell",
"bytes": "471"
},
{
"name": "Python",
"bytes": "59536"
},
{
"name": "Ruby",
"bytes": "2373"
}
],
"symlink_target": ""
}
|
import unittest
import scores_messages
class ScoresMessagesTest(unittest.TestCase):
def testSanityMessages(self):
"""Just verify there are no syntax errors in the protocol definitions."""
scores_messages.GamesRequest()
scores_messages.GamesResponse()
scores_messages.GameInfoRequest()
scores_messages.GameInfoResponse()
|
{
"content_hash": "cbe4e8c9d24771835fcd550f363da150",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 77,
"avg_line_length": 28.75,
"alnum_prop": 0.7710144927536232,
"repo_name": "martincochran/score-minion",
"id": "ea6aa32e114eadf4fe8c5f226769c686d676eab5",
"size": "948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scores_messages_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "154"
},
{
"name": "GAP",
"bytes": "7024"
},
{
"name": "HTML",
"bytes": "714560"
},
{
"name": "JavaScript",
"bytes": "18466"
},
{
"name": "Protocol Buffer",
"bytes": "3182"
},
{
"name": "Python",
"bytes": "1516969"
}
],
"symlink_target": ""
}
|
from holoviews.element import RGB, Tiles, Points, Bounds
from holoviews.element.tiles import StamenTerrain, _ATTRIBUTIONS
from .test_plot import TestPlotlyPlot, plotly_renderer
import numpy as np
class TestMapboxTilesPlot(TestPlotlyPlot):
def setUp(self):
super().setUp()
# Precompute coordinates
self.xs = [3000000, 2000000, 1000000]
self.ys = [-3000000, -2000000, -1000000]
self.x_range = (-5000000, 4000000)
self.x_center = sum(self.x_range) / 2.0
self.y_range = (-3000000, 2000000)
self.y_center = sum(self.y_range) / 2.0
self.lon_range, self.lat_range = Tiles.easting_northing_to_lon_lat(self.x_range, self.y_range)
self.lon_centers, self.lat_centers = Tiles.easting_northing_to_lon_lat(
[self.x_center], [self.y_center]
)
self.lon_center, self.lat_center = self.lon_centers[0], self.lat_centers[0]
self.lons, self.lats = Tiles.easting_northing_to_lon_lat(self.xs, self.ys)
def test_mapbox_tiles_defaults(self):
tiles = Tiles("").redim.range(
x=self.x_range, y=self.y_range
)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check dummy trace
self.assertEqual(len(fig_dict["data"]), 1)
dummy_trace = fig_dict["data"][0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertEqual(dummy_trace["showlegend"], False)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "white-bg")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check that xaxis and yaxis entries are not created
self.assertNotIn("xaxis", fig_dict["layout"])
self.assertNotIn("yaxis", fig_dict["layout"])
# Check no layers are introduced when an empty tile server string is
# passed
layers = fig_dict["layout"]["mapbox"].get("layers", [])
self.assertEqual(len(layers), 0)
def test_styled_mapbox_tiles(self):
tiles = Tiles().opts(mapboxstyle="dark", accesstoken="token-str").redim.range(
x=self.x_range, y=self.y_range
)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "dark")
self.assertEqual(subplot["accesstoken"], "token-str")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
def test_raster_layer(self):
tiles = StamenTerrain().redim.range(
x=self.x_range, y=self.y_range
).opts(alpha=0.7, min_zoom=3, max_zoom=7)
fig_dict = plotly_renderer.get_plot_state(tiles)
# Check dummy trace
self.assertEqual(len(fig_dict["data"]), 1)
dummy_trace = fig_dict["data"][0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertEqual(dummy_trace["showlegend"], False)
# Check mapbox subplot
subplot = fig_dict["layout"]["mapbox"]
self.assertEqual(subplot["style"], "white-bg")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check for raster layer
layers = fig_dict["layout"]["mapbox"].get("layers", [])
self.assertEqual(len(layers), 1)
layer = layers[0]
self.assertEqual(layer["source"][0].lower(), tiles.data.lower())
self.assertEqual(layer["opacity"], 0.7)
self.assertEqual(layer["sourcetype"], "raster")
self.assertEqual(layer["minzoom"], 3)
self.assertEqual(layer["maxzoom"], 7)
self.assertEqual(layer["sourceattribution"], _ATTRIBUTIONS[('stamen', 'net/t')])
def test_overlay(self):
# Base layer is mapbox vector layer
tiles = Tiles("").opts(mapboxstyle="dark", accesstoken="token-str")
# Raster tile layer
stamen_raster = StamenTerrain().opts(alpha=0.7)
# RGB layer
rgb_data = np.random.rand(10, 10, 3)
rgb = RGB(
rgb_data,
bounds=(self.x_range[0], self.y_range[0], self.x_range[1], self.y_range[1])
).opts(
opacity=0.5
)
# Points layer
points = Points([(0, 0), (self.x_range[1], self.y_range[1])]).opts(
show_legend=True
)
# Bounds
bounds = Bounds((self.x_range[0], self.y_range[0], 0, 0))
# Overlay
overlay = (tiles * stamen_raster * rgb * points * bounds).redim.range(
x=self.x_range, y=self.y_range
)
# Render to plotly figure dictionary
fig_dict = plotly_renderer.get_plot_state(overlay)
# Check number of traces and layers
traces = fig_dict["data"]
subplot = fig_dict["layout"]["mapbox"]
layers = subplot["layers"]
self.assertEqual(len(traces), 5)
self.assertEqual(len(layers), 2)
# Check vector layer
dummy_trace = traces[0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertFalse(dummy_trace["showlegend"])
self.assertEqual(subplot["style"], "dark")
self.assertEqual(subplot["accesstoken"], "token-str")
self.assertEqual(
subplot['center'], {'lat': self.lat_center, 'lon': self.lon_center}
)
# Check raster layer
dummy_trace = traces[1]
raster_layer = layers[0]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [])
self.assertEqual(dummy_trace["lat"], [])
self.assertFalse(dummy_trace["showlegend"])
# Check raster_layer
self.assertEqual(raster_layer["below"], "traces")
self.assertEqual(raster_layer["opacity"], 0.7)
self.assertEqual(raster_layer["sourcetype"], "raster")
self.assertEqual(raster_layer["source"][0].lower(), stamen_raster.data.lower())
# Check RGB layer
dummy_trace = traces[2]
rgb_layer = layers[1]
self.assertEqual(dummy_trace["type"], "scattermapbox")
self.assertEqual(dummy_trace["lon"], [None])
self.assertEqual(dummy_trace["lat"], [None])
self.assertFalse(dummy_trace["showlegend"])
# Check rgb_layer
self.assertEqual(rgb_layer["below"], "traces")
self.assertEqual(rgb_layer["opacity"], 0.5)
self.assertEqual(rgb_layer["sourcetype"], "image")
self.assertTrue(rgb_layer["source"].startswith("data:image/png;base64,iVBOR"))
self.assertEqual(rgb_layer["coordinates"], [
[self.lon_range[0], self.lat_range[1]],
[self.lon_range[1], self.lat_range[1]],
[self.lon_range[1], self.lat_range[0]],
[self.lon_range[0], self.lat_range[0]]
])
# Check Points layer
points_trace = traces[3]
self.assertEqual(points_trace["type"], "scattermapbox")
self.assertEqual(points_trace["lon"], np.array([0, self.lon_range[1]]))
self.assertEqual(points_trace["lat"], np.array([0, self.lat_range[1]]))
self.assertEqual(points_trace["mode"], "markers")
self.assertTrue(points_trace.get("showlegend", True))
# Check Bounds layer
bounds_trace = traces[4]
self.assertEqual(bounds_trace["type"], "scattermapbox")
self.assertEqual(bounds_trace["lon"], np.array([
self.lon_range[0], self.lon_range[0], 0, 0, self.lon_range[0]
]))
self.assertEqual(bounds_trace["lat"], np.array([
self.lat_range[0], 0, 0, self.lat_range[0], self.lat_range[0]
]))
self.assertEqual(bounds_trace["mode"], "lines")
self.assertTrue(points_trace["showlegend"], False)
# No xaxis/yaxis
self.assertNotIn("xaxis", fig_dict["layout"])
self.assertNotIn("yaxis", fig_dict["layout"])
|
{
"content_hash": "311e984c898991d7ba62becb339a8793",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 102,
"avg_line_length": 38.91079812206573,
"alnum_prop": 0.5936293436293436,
"repo_name": "ioam/holoviews",
"id": "bcd294cd0c33de4c2f3a33db8c136a7a3f76a532",
"size": "8288",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "holoviews/tests/plotting/plotly/test_tiles.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1546"
},
{
"name": "HTML",
"bytes": "18997"
},
{
"name": "JavaScript",
"bytes": "20747"
},
{
"name": "Jupyter Notebook",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "3241652"
}
],
"symlink_target": ""
}
|
from pprint import pprint
import articlequality
from revscoring import Model
scorer_model = Model.load(open('../revscoring_models/enwiki.nettrom_wp10.gradient_boosting.model', 'rb'))
# Classifies a revision of an article based on wikitext alone
text = "An '''anachronism''' {{cite }}(from the [[Ancient Greek|Greek]] <ref ..."
prediction_results = articlequality.score(scorer_model, text)
# Print predicted assessment class and probabilities for all classes.
pprint(("assessment", prediction_results['prediction']))
pprint(("probs", prediction_results['probability']))
|
{
"content_hash": "5c83eb305612822163387a8373f42adb",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 105,
"avg_line_length": 40.92857142857143,
"alnum_prop": 0.7609075043630017,
"repo_name": "halfak/wikiclass",
"id": "fb9088cbb9cc31ff9ea46fb8e192b34f3cd772d4",
"size": "573",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/classify_text.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "6739"
},
{
"name": "Python",
"bytes": "53959"
}
],
"symlink_target": ""
}
|
"""
This is a bunch of classes to manage cyber record channel extractor.
"""
import os
import struct
import sys
import cv2
import numpy as np
from pypcd import pypcd
from modules.tools.sensor_calibration.data_file_object import TimestampFileObject, OdometryFileObject
from modules.drivers.proto import conti_radar_pb2
from modules.drivers.proto import sensor_image_pb2
from modules.drivers.proto import pointcloud_pb2
from modules.localization.proto import gps_pb2
from modules.localization.proto import localization_pb2
class SensorMessageParser(object):
"""Wrapper for cyber channel message extractor"""
# Initializing extractor
def __init__(self, output_path, instance_saving=True):
"""
instance_saving:
True for large channel message, e.g., Camera/lidar/Radar;
False for small channel message, e.g., GNSS topics
"""
self._msg_parser = None
self._timestamps = []
self._proto_parser = None
self._init_parser()
self._parsed_data = None
self._output_path = output_path
self._timestamp_file = os.path.join(self._output_path, "timestamps")
self._instance_saving = instance_saving
# initializing msg and proto parser
def _init_parser(self):
raise NotImplementedError
def parse_sensor_message(self, msg):
raise NotImplementedError
def save_messages_to_file(self):
return True
def get_msg_count(self):
return len(self._timestamps)
def get_timestamps(self):
return self._timestamps
def save_timestamps_to_file(self):
timestamp_file_obj = TimestampFileObject(self._timestamp_file,
operation='write',
file_type='txt')
timestamp_file_obj.save_to_file(self._timestamps)
return True
class GpsParser(SensorMessageParser):
"""
class to parse GNSS odometry channel.
saving this small topic as a whole.
"""
def __init__(self, output_path, instance_saving=False):
super(GpsParser, self).__init__(output_path, instance_saving)
if not self._instance_saving:
self._parsed_data = []
self._odomotry_output_file = os.path.join(self._output_path, "odometry")
def _init_parser(self):
self._msg_parser = gps_pb2.Gps()
def parse_sensor_message(self, msg):
""" parse Gps information from GNSS odometry channel"""
gps = self._msg_parser
gps.ParseFromString(msg.message)
# all double, except point_type is int32
ts = gps.header.timestamp_sec
self._timestamps.append(ts)
point_type = 56
qw = gps.localization.orientation.qw
qx = gps.localization.orientation.qx
qy = gps.localization.orientation.qy
qz = gps.localization.orientation.qz
x = gps.localization.position.x
y = gps.localization.position.y
z = gps.localization.position.z
# save 9 values as a tuple, for eaisier struct packing during storage
if self._instance_saving:
raise ValueError("Gps odometry should be saved in a file")
else:
self._parsed_data.append((ts, point_type, qw, qx, qy, qz, x, y, z))
return True
def save_messages_to_file(self):
"""save list of parsed Odometry messages to file"""
odometry_file_obj = OdometryFileObject(file_path=self._odomotry_output_file,
operation='write',
file_type='binary')
odometry_file_obj.save_to_file(self._parsed_data)
return True
class PoseParser(GpsParser):
"""
inherit similar data saver and data structure from GpsParser
save the ego-localization information same as odometry
"""
def _init_parser(self):
self._msg_parser = localization_pb2.LocalizationEstimate()
def parse_sensor_message(self, msg):
""" parse localization information from localization estimate channel"""
loc_est = self._msg_parser
loc_est.ParseFromString(msg.message)
# all double, except point_type is int32
ts = loc_est.header.timestamp_sec
self._timestamps.append(ts)
point_type = 56
qw = loc_est.pose.orientation.qw
qx = loc_est.pose.orientation.qx
qy = loc_est.pose.orientation.qy
qz = loc_est.pose.orientation.qz
x = loc_est.pose.position.x
y = loc_est.pose.position.y
z = loc_est.pose.position.z
# save 9 values as a tuple, for eaisier struct packing during storage
if self._instance_saving:
raise ValueError("localization--pseudo odometry-- should be saved in a file")
else:
self._parsed_data.append((ts, point_type, qw, qx, qy, qz, x, y, z))
return True
class PointCloudParser(SensorMessageParser):
"""
class to parse apollo/$(lidar)/PointCloud2 channels.
saving separately each parsed msg
"""
def __init__(self, output_path, instance_saving=True, suffix='.pcd'):
super(PointCloudParser, self).__init__(output_path, instance_saving)
self._suffix = suffix
def convert_xyzit_pb_to_array(self, xyz_i_t, data_type):
arr = np.zeros(len(xyz_i_t), dtype=data_type)
for i, point in enumerate(xyz_i_t):
# change timestamp to timestamp_sec
arr[i] = (point.x, point.y, point.z,
point.intensity, point.timestamp/1e9)
return arr
def make_xyzit_point_cloud(self, xyz_i_t):
"""
Make a pointcloud object from PointXYZIT message, as Pointcloud.proto.
message PointXYZIT {
optional float x = 1 [default = nan];
optional float y = 2 [default = nan];
optional float z = 3 [default = nan];
optional uint32 intensity = 4 [default = 0];
optional uint64 timestamp = 5 [default = 0];
}
"""
md = {'version': .7,
'fields': ['x', 'y', 'z', 'intensity', 'timestamp'],
'count': [1, 1, 1, 1, 1],
'width': len(xyz_i_t),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(xyz_i_t),
'type': ['F', 'F', 'F', 'U', 'F'],
'size': [4, 4, 4, 4, 8],
'data': 'binary_compressed'}
typenames = []
for t, s in zip(md['type'], md['size']):
np_type = pypcd.pcd_type_to_numpy_type[(t, s)]
typenames.append(np_type)
np_dtype = np.dtype(list(zip(md['fields'], typenames)))
pc_data = self.convert_xyzit_pb_to_array(xyz_i_t, data_type=np_dtype)
pc = pypcd.PointCloud(md, pc_data)
return pc
def save_pointcloud_meta_to_file(self, pc_meta, pcd_file):
pypcd.save_point_cloud_bin_compressed(pc_meta, pcd_file)
def _init_parser(self):
self._msg_parser = pointcloud_pb2.PointCloud()
def parse_sensor_message(self, msg):
"""
Transform protobuf PointXYZIT to standard PCL bin_compressed_file(*.pcd).
"""
pointcloud = self._msg_parser
pointcloud.ParseFromString(msg.message)
self._timestamps.append(pointcloud.measurement_time)
# self._timestamps.append(pointcloud.header.timestamp_sec)
self._parsed_data = self.make_xyzit_point_cloud(pointcloud.point)
if self._instance_saving:
file_name = "%05d" % self.get_msg_count() + self._suffix
output_file = os.path.join(self._output_path, file_name)
self.save_pointcloud_meta_to_file(pc_meta=self._parsed_data, pcd_file=output_file)
else:
raise ValueError("not implement multiple message concatenation for PointCloud2 topic")
# TODO(gchen-Apollo): add saint check
return True
class ImageParser(SensorMessageParser):
"""
class to parse apollo/$(camera)/image channels.
saving separately each parsed msg
"""
def __init__(self, output_path, instance_saving=True, suffix='.jpg'):
super(ImageParser, self).__init__(output_path, instance_saving)
self._suffix = suffix
def _init_parser(self):
self._msg_parser = sensor_image_pb2.Image()
def parse_sensor_message(self, msg):
image = self._msg_parser
image.ParseFromString(msg.message)
self._timestamps.append(image.header.timestamp_sec)
# Save image according to cyber format, defined in sensor camera proto.
# height = 4, image height, that is, number of rows.
# width = 5, image width, that is, number of columns.
# encoding = 6, as string, type is 'rgb8', 'bgr8' or 'gray'.
# step = 7, full row length in bytes.
# data = 8, actual matrix data in bytes, size is (step * rows).
# type = CV_8UC1 if image step is equal to width as gray, CV_8UC3
# if step * 3 is equal to width.
if image.encoding == 'rgb8' or image.encoding == 'bgr8':
if image.step != image.width * 3:
print('Image.step %d does not equal to Image.width %d * 3 for color image.'
% (image.step, image.width))
return False
elif image.encoding == 'gray' or image.encoding == 'y':
if image.step != image.width:
print('Image.step %d does not equal to Image.width %d or gray image.'
% (image.step, image.width))
return False
else:
print('Unsupported image encoding type: %s.' % image.encoding)
return False
channel_num = image.step // image.width
self._parsed_data = np.fromstring(image.data, dtype=np.uint8).reshape(
(image.height, image.width, channel_num))
if self._instance_saving:
file_name = "%05d" % self.get_msg_count() + self._suffix
output_file = os.path.join(self._output_path, file_name)
self.save_image_mat_to_file(image_file=output_file)
else:
raise ValueError("not implement multiple message concatenation for Image topic")
return True
def save_image_mat_to_file(self, image_file):
# Save image in BGR oder
image_mat = self._parsed_data
if self._msg_parser.encoding == 'rgb8':
cv2.imwrite(image_file, cv2.cvtColor(image_mat, cv2.COLOR_RGB2BGR))
else:
cv2.imwrite(image_file, image_mat)
class ContiRadarParser(SensorMessageParser):
"""
class to parse apollo/sensor/radar/$(position) channels.
saving separately each parsed msg
"""
def __init__(self, output_path, instance_saving=True, suffix='.pcd'):
super(ContiRadarParser, self).__init__(output_path, instance_saving)
self._suffix = suffix
def convert_contiobs_pb_to_array(self, obs, data_type):
arr = np.zeros(len(obs), dtype=data_type)
for i, ob in enumerate(obs):
# change timestamp to timestamp_sec
# z value is 0
# now using x, y, z, and t. later more information will be added
arr[i] = (ob.longitude_dist, ob.lateral_dist, 0, ob.header.timestamp_sec)
return arr
def make_contidata_point_cloud(self, contiobs):
"""
Make a pointcloud object from contiradar message, as conti_radar.proto.
message ContiRadarObs {
// x axis ^
// | longitude_dist
// |
// |
// |
// lateral_dist |
// y axis |
// <----------------
// ooooooooooooo //radar front surface
optional apollo.common.Header header = 1;
// longitude distance to the radar; (+) = forward; unit = m
optional double longitude_dist = 4;
// lateral distance to the radar; (+) = left; unit = m
optional double lateral_dist = 5;
.......
}
"""
md = {'version': .7,
'fields': ['x', 'y', 'z', 'timestamp'],
'count': [1, 1, 1, 1],
'width': len(contiobs),
'height': 1,
'viewpoint': [0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0],
'points': len(contiobs),
'type': ['F', 'F', 'F', 'F'],
'size': [4, 4, 4, 8],
'data': 'binary'}
typenames = []
for t, s in zip(md['type'], md['size']):
np_type = pypcd.pcd_type_to_numpy_type[(t, s)]
typenames.append(np_type)
np_dtype = np.dtype(list(zip(md['fields'], typenames)))
pc_data = self.convert_contiobs_pb_to_array(contiobs, data_type=np_dtype)
pc = pypcd.PointCloud(md, pc_data)
return pc
def save_pointcloud_meta_to_file(self, pc_meta, pcd_file):
pypcd.save_point_cloud_bin(pc_meta, pcd_file)
def _init_parser(self):
self._msg_parser = conti_radar_pb2.ContiRadar()
def parse_sensor_message(self, msg):
"""
Transform protobuf radar message to standard PCL bin_file(*.pcd).
"""
radar_data = self._msg_parser
radar_data.ParseFromString(msg.message)
self._timestamps.append(radar_data.header.timestamp_sec)
# self._timestamps.append(pointcloud.header.timestamp_sec)
self._parsed_data = self.make_contidata_point_cloud(radar_data.contiobs)
if self._instance_saving:
file_name = "%05d" % self.get_msg_count() + self._suffix
output_file = os.path.join(self._output_path, file_name)
self.save_pointcloud_meta_to_file(pc_meta=self._parsed_data, pcd_file=output_file)
else:
raise ValueError("not implement multiple message concatenation for COontiRadar topic")
# TODO(gchen-Apollo): add saint check
return True
|
{
"content_hash": "25521693f206332d17f3f8841e8fb035",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 101,
"avg_line_length": 37.0844327176781,
"alnum_prop": 0.583920313055852,
"repo_name": "ApolloAuto/apollo",
"id": "946923533f880d8fccb1a136ba63ef70d12a492c",
"size": "14840",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/tools/sensor_calibration/sensor_msg_extractor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "1922"
},
{
"name": "Batchfile",
"bytes": "791"
},
{
"name": "C",
"bytes": "66747"
},
{
"name": "C++",
"bytes": "19641274"
},
{
"name": "CMake",
"bytes": "3600"
},
{
"name": "Cuda",
"bytes": "221003"
},
{
"name": "Dockerfile",
"bytes": "8522"
},
{
"name": "GLSL",
"bytes": "7000"
},
{
"name": "HTML",
"bytes": "9768"
},
{
"name": "Handlebars",
"bytes": "991"
},
{
"name": "JavaScript",
"bytes": "461346"
},
{
"name": "Makefile",
"bytes": "6626"
},
{
"name": "Python",
"bytes": "1178328"
},
{
"name": "SCSS",
"bytes": "52149"
},
{
"name": "Shell",
"bytes": "783043"
},
{
"name": "Smarty",
"bytes": "33183"
},
{
"name": "Starlark",
"bytes": "1025633"
},
{
"name": "Vim Script",
"bytes": "161"
}
],
"symlink_target": ""
}
|
import unittest
import IECore
import Gaffer
import GafferScene
import GafferSceneTest
class FilterQueryTest( GafferSceneTest.SceneTestCase ) :
def test( self ) :
# /group
# /grid
# /gridLines
# /centerLines
# /borderLines
# /plane
# /sphere
grid = GafferScene.Grid()
plane = GafferScene.Plane()
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( grid["out"] )
group["in"][1].setInput( plane["out"] )
group["in"][2].setInput( sphere["out"] )
pathFilter = GafferScene.PathFilter()
query = GafferScene.FilterQuery()
query["scene"].setInput( group["out"] )
query["filter"].setInput( pathFilter["out"] )
allPaths = IECore.PathMatcher()
GafferScene.SceneAlgo.matchingPaths(
IECore.PathMatcher( [ "/..." ] ),
group["out"],
allPaths
)
self.assertEqual( allPaths.size(), 8 )
for pattern in [
"/",
"/group",
"/group/grid",
"/group/grid/gridLines",
"/group/*",
"/group/grid/*",
"/group/...",
"/noMatch"
] :
pathFilter["paths"].setValue( IECore.StringVectorData( [ pattern ] ) )
for pathString in allPaths.paths() :
path = GafferScene.ScenePlug.stringToPath( pathString )
with Gaffer.Context() as c :
c["scene:path"] = path
match = pathFilter["out"].match( group["out"] )
query["location"].setValue( pathString )
self.assertEqual( query["exactMatch"].getValue(), bool( match & IECore.PathMatcher.Result.ExactMatch ) )
self.assertEqual( query["descendantMatch"].getValue(), bool( match & IECore.PathMatcher.Result.DescendantMatch ) )
self.assertEqual( query["ancestorMatch"].getValue(), bool( match & IECore.PathMatcher.Result.AncestorMatch ) )
ancestor = query["closestAncestor"].getValue()
if ancestor == "" :
self.assertFalse( match & IECore.PathMatcher.Result.ExactMatch )
self.assertFalse( match & IECore.PathMatcher.Result.AncestorMatch )
else :
ancestor = GafferScene.ScenePlug.stringToPath( ancestor )
with Gaffer.Context() as c :
c["scene:path"] = ancestor
self.assertTrue( pathFilter["out"].match( group["out"] ) & IECore.PathMatcher.Result.ExactMatch )
for i in range( len( ancestor ), len( path ) ) :
c["scene:path"] = path[:i+1]
self.assertFalse( pathFilter["out"].match( group["out"] ) & IECore.PathMatcher.Result.ExactMatch )
def testEmptyLocation( self ) :
plane = GafferScene.Plane()
plane["sets"].setValue( "test" )
setFilter = GafferScene.SetFilter()
setFilter["setExpression"].setValue( "test" )
query = GafferScene.FilterQuery()
query["scene"].setInput( plane["out"] )
query["filter"].setInput( setFilter["out"] )
self.assertEqual( query["exactMatch"].getValue(), False )
self.assertEqual( query["descendantMatch"].getValue(), False )
self.assertEqual( query["ancestorMatch"].getValue(), False )
self.assertEqual( query["closestAncestor"].getValue(), "" )
def testNonExistentLocation( self ) :
plane = GafferScene.Plane()
plane["sets"].setValue( "test" )
setFilter = GafferScene.SetFilter()
setFilter["setExpression"].setValue( "test" )
query = GafferScene.FilterQuery()
query["scene"].setInput( plane["out"] )
query["filter"].setInput( setFilter["out"] )
query["location"].setValue( "/sphere" )
self.assertEqual( query["exactMatch"].getValue(), False )
self.assertEqual( query["descendantMatch"].getValue(), False )
self.assertEqual( query["ancestorMatch"].getValue(), False )
self.assertEqual( query["closestAncestor"].getValue(), "" )
def testNonExistentLocationWithAncestors( self ) :
plane = GafferScene.Plane()
plane["sets"].setValue( "test" )
setFilter = GafferScene.SetFilter()
setFilter["setExpression"].setValue( "test" )
query = GafferScene.FilterQuery()
query["scene"].setInput( plane["out"] )
query["filter"].setInput( setFilter["out"] )
query["location"].setValue( "/plane/this/does/not/exist" )
self.assertEqual( query["exactMatch"].getValue(), False )
self.assertEqual( query["descendantMatch"].getValue(), False )
self.assertEqual( query["ancestorMatch"].getValue(), False )
self.assertEqual( query["closestAncestor"].getValue(), "" )
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "d1f5a15495f3e0a1126d0b1b0e3c63b4",
"timestamp": "",
"source": "github",
"line_count": 135,
"max_line_length": 118,
"avg_line_length": 31.59259259259259,
"alnum_prop": 0.6698710433763189,
"repo_name": "johnhaddon/gaffer",
"id": "d3238b84af2a63baea6571aec6734cda96365a6b",
"size": "6061",
"binary": false,
"copies": "5",
"ref": "refs/heads/main",
"path": "python/GafferSceneTest/FilterQueryTest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5790"
},
{
"name": "C",
"bytes": "61993"
},
{
"name": "C++",
"bytes": "9571062"
},
{
"name": "CMake",
"bytes": "85201"
},
{
"name": "GLSL",
"bytes": "6208"
},
{
"name": "Python",
"bytes": "10271481"
},
{
"name": "Ruby",
"bytes": "419"
},
{
"name": "Shell",
"bytes": "14389"
}
],
"symlink_target": ""
}
|
from parsers import TParsersBundle
from ling_utils import TTokenMatch
from ling_utils import CASE_UPPER, CASE_TITLE, CASE_LOWER
from ling_utils import span_tokenize_windows1251, unify_word
from segments_index import TSegmentIndexReader
from crawler import *
from collections import namedtuple
import pickle
import os
import sys
import numpy
def normalize_udc(udc_str):
udc_chain = []
for item in udc_str.split("."):
if not item.strip():
continue
try:
udc_chain += [int(item)]
except:
continue
return udc_chain
def get_surname(author_str_windows1251):
words = [unify_word(match[-1].decode("windows-1251")) for match in span_tokenize_windows1251(author_str_windows1251)]
if not words:
return ""
surname = max((len(word), word) for word in words)[1]
return surname
TBook = namedtuple('TBooks', ['title', 'author', 'udc', 'year', 'pages_count', 'lib_sections'], verbose=False)
class TCustomFieldsSearchEngine(object):
def add_title(self, title, object_id):
for match in span_tokenize_windows1251(title):
token = unify_word(match[-1].decode("windows-1251"))
self.title_index.setdefault(token, []).append(object_id)
def find_title(self, title_query):
matched_objects = None
for match in span_tokenize_windows1251(title_query):
token = unify_word(match[-1].decode("windows-1251"))
if not token in self.title_index:
return []
if matched_objects == None:
matched_objects = set(self.title_index[token])
else:
matched_objects &= set(self.title_index[token])
if not matched_objects:
return []
return matched_objects
def add_year(self, year, object_id):
try:
year = int(year)
except:
return
self.year_index.setdefault(int(year), []).append(object_id)
def find_year(self, query_year, search_lower=False, search_bigger=False):
try:
query_year = int(query_year)
except:
return []
matched_objects = []
for year, objects in self.year_index.items():
if year == query_year or \
search_lower and year < query_year or \
search_bigger and year > query_year:
matched_objects += objects
return matched_objects
def add_udc(self, udc_str, object_id):
udc_array = normalize_udc(udc_str)
key = "|" + "|".join([str(number) for number in udc_array]) + "|"
if udc_array:
self.udc_index.setdefault(udc_array[0], []).append((key, object_id))
def find_udc(self, query_udc_str):
udc_array = normalize_udc(query_udc_str)
query_key = "|" + "|".join([str(number) for number in udc_array]) + "|"
if not udc_array:
return []
object_ids = []
if udc_array[0] in self.udc_index:
for key, object_id in self.udc_index[udc_array[0]]:
if key.startswith(query_key):
object_ids.append(object_id)
return object_ids
def add_author(self, author_str_windows1251, object_id):
surname = get_surname(author_str_windows1251)
if surname:
self.author_index.setdefault(surname, []).append(object_id)
def find_author(self, author_str_windows1251):
surname = get_surname(author_str_windows1251)
if surname in self.author_index:
return self.author_index[surname]
return []
def add_pages_count(self, pages_count, object_id):
try:
pages_count = int(pages_count)
self.pages_index.setdefault(pages_count, []).append(object_id)
except:
pass
def find_pages_count(self, query_page_count, search_lower=False, search_bigger=False):
try:
query_page_count = int(query_page_count)
except:
return []
matched_objects = []
for page_count, objects in self.pages_index.items():
if page_count == query_page_count or \
search_lower and page_count < query_page_count or \
search_bigger and page_count > query_page_count:
matched_objects += objects
return matched_objects
def add_lib_sections(self, lib_sections, object_id):
for lib_section in lib_sections:
self.lib_section_index.setdefault(lib_section, []).append(object_id)
def find_lib_section(self, query_lib_section):
if not query_lib_section:
return []
try:
query_lib_section = int(query_lib_section)
except:
return []
if query_lib_section in self.lib_section_index:
return self.lib_section_index[query_lib_section]
else:
return []
def __init__(self, csv_file):
self.title_index = {}
self.year_index = {}
self.udc_index = {}
self.author_index = {}
self.pages_index = {}
self.lib_section_index = {}
self.objects = {}
title, author, udc, year, pages_count, lib_sections = "", "", "", "", "", ()
for object in TCrawler().crawl_csv(csv_file):
object.object_id = int(object.object_id)
for field in object.object_fields:
key = field.field_id
value = field.field_value
if key == "year":
try:
year = int(value)
except:
year = -1
self.add_year(year, object.object_id)
elif key == "udc":
udc = value
self.add_udc(udc, object.object_id)
elif key == "pages_count":
try:
pages_count = int(value)
except:
pages_count = -1
self.add_pages_count(pages_count, object.object_id)
pass
elif key == "author":
author = value
self.add_author(author, object.object_id)
pass
elif key == "title":
title = value
self.add_title(title, object.object_id)
elif key == LIB_SECTION_FIELD:
lib_sections = tuple(value)
self.add_lib_sections(lib_sections, object.object_id)
self.objects[object.object_id] = TBook(title, author, udc, year, pages_count, lib_sections)
def process_query(self,
title="",
author="",
udc="",
year="",
year_max="",
year_min="",
pages_count="",
pages_count_max="",
pages_count_min="",
lib_section=""):
objects = []
if title:
objects += [self.find_title(title.encode("windows-1251"))]
if author:
objects += [self.find_author(author.encode("windows-1251"))]
if udc:
objects += [self.find_udc(udc)]
if year:
objects += [self.find_year(year)]
if year_max:
objects += [self.find_year(year_max, search_lower=True)]
if year_min:
objects += [self.find_year(year_min, search_bigger=True)]
if pages_count:
objects += [self.find_pages_count(pages_count)]
if pages_count_max:
objects += [self.find_pages_count(pages_count_max, search_lower=True)]
if pages_count_min:
objects += [self.find_pages_count(pages_count_min, search_bigger=True)]
if lib_section:
objects += [self.find_lib_section(lib_section)]
if not objects:
return 0
cross_product = None
for objects_set in objects:
if not objects_set:
return -1
if cross_product == None:
cross_product = set(objects_set)
else:
cross_product &= set(objects_set)
if not cross_product:
return -1
return cross_product
def find_mentions_of_author_and_title(self, query):
tokens = [unify_word(match[-1].decode("windows-1251")) \
for match in span_tokenize_windows1251(query.encode("windows-1251"))[:10]]
tokens = set(tokens)
books_scores = {}
for token in tokens:
if token in self.title_index:
for obj_id in set(self.title_index[token]):
books_scores.setdefault(obj_id, 0)
books_scores[obj_id] += 1
if token in self.author_index:
for obj_id in set(self.author_index[token]):
books_scores.setdefault(obj_id, 0)
books_scores[obj_id] += 1
import math
min_match = math.ceil(len(tokens) * 0.6)
matched_books = [(matched_tokens, book) for book, matched_tokens in books_scores.items() \
if matched_tokens >= min_match]
matched_books.sort(reverse=True)
matched_books = [book for _, book in matched_books]
return matched_books
if __name__ == "__main__":
print "start"
import sys
import datetime
path = sys.argv[1];
print path
print "uploading"
start = datetime.datetime.now()
index = TCustomFieldsSearchEngine(path)
print "uploaded", len(index.objects), (datetime.datetime.now() - start)
print "query"
start = datetime.datetime.now()
#objects = index.process_query(author=u"юрьева")
objects = index.find_mention_of_author_and_titles(u"нефтехимия миронов")
print len(objects), (datetime.datetime.now() - start)
if 1:
for object_id in objects:
title = index.objects[object_id].title
author = index.objects[object_id].author
udc = index.objects[object_id].udc
year = index.objects[object_id].year
pages_count = index.objects[object_id].pages_count
lib_sections = index.objects[object_id].lib_sections
print author.decode("windows-1251"), title.decode("windows-1251")
#print author.decode("windows-1251"), "||", title.decode("windows-1251")
#print author.decode("windows-1251"), "||", title.decode("windows-1251"), "||", udc, "||", year, "||", pages_count, "||", lib_sections
|
{
"content_hash": "0f94edca50a193717691813a579389b0",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 150,
"avg_line_length": 38.34628975265018,
"alnum_prop": 0.5342793955031331,
"repo_name": "mavlyutovrus/light_search",
"id": "07c7423fa0d48391c7cbc99bf0d665de4f34a66b",
"size": "10896",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/custom_fields_search_engine.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "156224"
},
{
"name": "Python",
"bytes": "77007"
},
{
"name": "Shell",
"bytes": "436"
}
],
"symlink_target": ""
}
|
import gameoflife
if __name__ == "__main__":
app = gameoflife.App()
app.mainloop()
|
{
"content_hash": "15f0e8d98a7ce613fc1ea49d5ddcd215",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 26,
"avg_line_length": 15.333333333333334,
"alnum_prop": 0.5760869565217391,
"repo_name": "deaz/game-of-life",
"id": "efe61071028163fdd0b9480ee17b8a98ac50a1d1",
"size": "116",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6780"
}
],
"symlink_target": ""
}
|
from datetime import datetime
from unittest import TestCase
from poll.models import Poll, Response
from django.contrib.auth.models import User
from rapidsms.models import Contact, Backend, Connection
from rapidsms_httprouter.router import get_router
from dateutil.relativedelta import relativedelta
class TestPolls(TestCase):
def setUp(self):
self.male_user = User.objects.create(username='fred', email='shaggy@scooby.com')
self.female_user = User.objects.create(username='scrapy', email='shaggy@scooby.com')
self.poll = Poll.objects.create(name='test poll', question='are you happy', user=self.male_user, type=Poll.TYPE_TEXT)
self.male_contact = Contact.objects.create(name='shaggy', user=self.male_user, gender='M',birthdate=datetime.now() - relativedelta(years=20))
self.female_contact = Contact.objects.create(name='dafny', user=self.female_user, gender='F',birthdate=datetime.now() - relativedelta(years=25))
self.backend = Backend.objects.create(name='scoobydoo')
self.connection_for_male = Connection.objects.create(identity='0794339344', backend=self.backend)
self.connection_for_male.contact = self.male_contact
self.connection_for_male.save()
self.connection_for_female = Connection.objects.create(identity='0794339345', backend=self.backend)
self.connection_for_female.contact = self.female_contact
self.connection_for_female.save()
self.poll.contacts.add(self.female_contact)
self.poll.contacts.add(self.male_contact)
self.poll.add_yesno_categories()
self.poll.save()
self.poll.start()
def send_message(self, connection, message):
router = get_router()
router.handle_incoming(connection.backend.name, connection.identity, message)
def test_responses_by_gender_only_for_male(self):
self.send_message(self.connection_for_male, 'yes')
yes_aggregation = [1, u"yes" ]
filtered_responses = self.poll.responses_by_gender(gender='m')
self.assertIn(yes_aggregation, filtered_responses)
def test_responses_by_gender(self):
self.send_message(self.connection_for_male, 'yes')
self.send_message(self.connection_for_female, 'No')
no_aggregation = [1, u"no" ]
filtered_responses = self.poll.responses_by_gender(gender='F')
self.assertIn(no_aggregation, filtered_responses)
def test_responses_by_gender_should_check_if_poll_is_yes_no(self):
poll = Poll.objects.create(name='test poll2', question='are you happy??', user=self.male_user, type=Poll.TYPE_TEXT)
with(self.assertRaises(AssertionError)):
poll.responses_by_gender(gender='F')
def test_responses_by_age(self):
self.send_message(self.connection_for_male,'yes')
self.send_message(self.connection_for_female,'no')
self.send_message(self.connection_for_male,'foobar')
yes_responses = [1, u"yes" ]
no_responses = [1, u"no" ]
unknown_responses = [1, u"unknown" ]
results = self.poll.responses_by_age(20, 26)
self.assertIn(yes_responses,results)
self.assertIn(no_responses,results)
self.assertIn(unknown_responses,results)
def tearDown(self):
Backend.objects.all().delete()
Connection.objects.all().delete()
Response.objects.all().delete()
Poll.objects.all().delete()
Contact.objects.all().delete()
User.objects.all().delete()
|
{
"content_hash": "d79e240ef74f2c842d5b4ce5396f48d4",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 152,
"avg_line_length": 42.204819277108435,
"alnum_prop": 0.6834142163859549,
"repo_name": "unicefuganda/edtrac",
"id": "3e4f302fa00dfa838c93cbdaa3ac3aeb709648a9",
"size": "3503",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "edtrac_project/rapidsms_polls/poll/test/test_polls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "277434"
},
{
"name": "JavaScript",
"bytes": "190346"
},
{
"name": "Python",
"bytes": "2621572"
},
{
"name": "Shell",
"bytes": "4755"
}
],
"symlink_target": ""
}
|
import sys
sys.stdout.write("qwert\nyuiop...")
|
{
"content_hash": "b27bbabddabc241cb4a266457d704e2e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 35,
"avg_line_length": 12.25,
"alnum_prop": 0.6938775510204082,
"repo_name": "val-labs/xed",
"id": "4a9fa648ee905fcc7011efb5d813bb88f38d8b88",
"size": "49",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3691"
},
{
"name": "Makefile",
"bytes": "66"
},
{
"name": "Python",
"bytes": "49"
}
],
"symlink_target": ""
}
|
import abc
from functools import wraps
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_messaging.exceptions import MessagingTimeout
from oslo_messaging.rpc import client
from stevedore import driver
from mistral import exceptions as exc
LOG = logging.getLogger(__name__)
_IMPL_CLIENT = None
_IMPL_SERVER = None
_TRANSPORT = None
def cleanup():
"""Intended to be used by tests to recreate all RPC related objects."""
global _TRANSPORT
_TRANSPORT = None
# TODO(rakhmerov): This method seems misplaced. Now we have different kind
# of transports (oslo, kombu) and this module should not have any oslo
# specific things anymore.
def get_transport():
global _TRANSPORT
if not _TRANSPORT:
_TRANSPORT = messaging.get_rpc_transport(cfg.CONF)
return _TRANSPORT
def get_rpc_server_driver():
rpc_impl = cfg.CONF.rpc_implementation
global _IMPL_SERVER
if not _IMPL_SERVER:
_IMPL_SERVER = driver.DriverManager(
'mistral.rpc.backends',
'%s_server' % rpc_impl
).driver
return _IMPL_SERVER
def get_rpc_client_driver():
rpc_impl = cfg.CONF.rpc_implementation
global _IMPL_CLIENT
if not _IMPL_CLIENT:
_IMPL_CLIENT = driver.DriverManager(
'mistral.rpc.backends',
'%s_client' % rpc_impl
).driver
return _IMPL_CLIENT
def _wrap_exception_and_reraise(exception):
message = "%s: %s" % (exception.__class__.__name__, exception.args[0])
raise exc.MistralException(message)
def wrap_messaging_exception(method):
"""The decorator unwraps a remote error into one of the mistral exceptions.
oslo.messaging has different behavior on raising exceptions depending on
whether we use 'fake' or 'rabbit' transports. In case of 'rabbit' transport
it raises an instance of RemoteError which forwards directly to the API.
The RemoteError instance contains one of the MistralException instances
raised remotely on the RPC server side and for correct exception handling
we need to unwrap and raise the original wrapped exception.
"""
@wraps(method)
def decorator(*args, **kwargs):
try:
return method(*args, **kwargs)
except exc.MistralException:
raise
except MessagingTimeout:
timeout = cfg.CONF.rpc_response_timeout
raise exc.MistralException('This rpc call "%s" took longer than '
'configured %s seconds.' %
(method.__name__, timeout))
except (client.RemoteError, exc.KombuException, Exception) as e:
# Since we're going to transform the original exception
# we need to log it as is.
LOG.exception(
"Caught a messaging remote error."
" See details of the original exception."
)
if hasattr(e, 'exc_type') and hasattr(exc, e.exc_type):
exc_cls = getattr(exc, e.exc_type)
raise exc_cls(e.value)
_wrap_exception_and_reraise(e)
return decorator
class RPCClient(object):
def __init__(self, conf):
"""Base class for RPCClient's drivers
RPC Client is responsible for sending requests to RPC Server.
All RPC client drivers have to inherit from this class.
:param conf: Additional config provided by upper layer.
"""
self.conf = conf
@abc.abstractmethod
def sync_call(self, ctx, method, target=None, **kwargs):
"""Synchronous call of RPC method.
Blocks the thread and wait for method result.
"""
raise NotImplementedError
@abc.abstractmethod
def async_call(self, ctx, method, target=None, fanout=False, **kwargs):
"""Asynchronous call of RPC method.
Does not block the thread, just send invoking data to
the RPC server and immediately returns nothing.
"""
raise NotImplementedError
class RPCServer(object):
def __init__(self, conf):
"""Base class for RPCServer's drivers
RPC Server should listen for request coming from RPC Clients and
respond to them respectively to the registered endpoints.
All RPC server drivers have to inherit from this class.
:param conf: Additional config provided by upper layer.
"""
self.conf = conf
@abc.abstractmethod
def register_endpoint(self, endpoint):
"""Registers a new RPC endpoint.
:param endpoint: an object containing methods which
will be used as RPC methods.
"""
raise NotImplementedError
@abc.abstractmethod
def run(self, executor='eventlet'):
"""Runs the RPC server.
:param executor: Executor used to process incoming requests. Different
implementations may support different options.
"""
raise NotImplementedError
def stop(self, graceful=False):
"""Stop the RPC server.
:param graceful: True if this method call should wait till all
internal threads are finished.
:return:
"""
# No-op by default.
pass
def wait(self):
"""Wait till all internal threads are finished."""
# No-op by default.
pass
|
{
"content_hash": "0e36f8fce231124c6f841b2a774d5d3d",
"timestamp": "",
"source": "github",
"line_count": 186,
"max_line_length": 79,
"avg_line_length": 28.951612903225808,
"alnum_prop": 0.6380687093779016,
"repo_name": "openstack/mistral",
"id": "828a49d7c4b4700c44cf1adec7add6e37cc47b39",
"size": "6049",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mistral/rpc/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2091"
},
{
"name": "Mako",
"bytes": "951"
},
{
"name": "Python",
"bytes": "2617595"
},
{
"name": "Shell",
"bytes": "26731"
}
],
"symlink_target": ""
}
|
import unittest
from streamlink.plugins.lrt import LRT
class TestPluginLRT(unittest.TestCase):
def test_can_handle_url(self):
should_match = [
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-televizija",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-kultura",
"https://www.lrt.lt/mediateka/tiesiogiai/lrt-lituanica"
"https://www.lrt.lt/mediateka/irasas/1013694276/savanoriai-tures-galimybe-pamatyti-popieziu-is-arciau#wowzaplaystart=1511000&wowzaplayduration=168000"
]
for url in should_match:
self.assertTrue(LRT.can_handle_url(url))
def test_can_handle_url_negative(self):
should_not_match = [
"https://www.lrt.lt",
"https://www.youtube.com",
]
for url in should_not_match:
self.assertFalse(LRT.can_handle_url(url))
|
{
"content_hash": "7c3ba8dc5b4c3ac63836a54f036db850",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 162,
"avg_line_length": 36.291666666666664,
"alnum_prop": 0.6394948335246843,
"repo_name": "back-to/streamlink",
"id": "c7d4476c6e81de868f255ed71ae1c008ccebceea",
"size": "871",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/plugins/test_lrt.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1451380"
},
{
"name": "Shell",
"bytes": "18044"
}
],
"symlink_target": ""
}
|
import unittest
from amara.lib import testsupport
from amara.bindery import parse
from amara import tree
from xml.dom import Node
import os
import tempfile
MONTY_XML = """<monty>
<python spam="eggs">What do you mean "bleh"</python>
<python ministry="abuse">But I was looking for argument</python>
</monty>"""
SILLY_XML = """<parent>
<element name="a">A</element>
<element name="b">B</element>
</parent>"""
SILLY_NS_XML = """<a:parent xmlns:a="urn:bogus:a" xmlns:b="urn:bogus:b">
<b:sillywrap>
<a:element name="a">A</a:element>
<a:element name="b">B</a:element>
</b:sillywrap>
</a:parent>"""
NS_XML = """<doc xmlns:a="urn:bogus:a" xmlns:b="urn:bogus:b">
<a:monty/>
<b:python/>
</doc>"""
SANE_DEFAULT_XML = """<doc xmlns="urn:bogus:a">
<monty/>
<python/>
</doc>"""
SANE_DEFAULT_XML_PREFIXES = {u'x': u'urn:bogus:a'}
class Test_sane_default_1(unittest.TestCase):
"""Testing a sane document using default NS"""
def test_specify_ns(self):
"""Parse with string"""
doc = parse(SANE_DEFAULT_XML, prefixes=SANE_DEFAULT_XML_PREFIXES)
#print doc.xml_namespaces
self.assertEqual(len(list(doc.xml_select(u'//x:monty'))), 1)
return
def test_attr_assignment(self):
doc = parse(SANE_DEFAULT_XML, prefixes=SANE_DEFAULT_XML_PREFIXES)
monty = doc.doc.monty
# Create attribute node
attr_node = tree.attribute(u'urn:bogus:a', 'setitem', 'value')
monty[u'urn:bogus:a', 'setitem'] = attr_node
self.assertEqual(monty.xml_attributes[(u'urn:bogus:a', u'setitem')],
'value')
# Check for mismatched namespace
attr_node = tree.attribute(u'urn:bogus:a', 'setitem2', 'value')
def f():
monty[u'urn:wrong-value', 'setitem2'] = attr_node
self.assertRaises(ValueError, f)
# Check for mismatched local name
def f():
monty[u'urn:bogus:a', 'setitem'] = attr_node
self.assertRaises(ValueError, f)
# Test with no namespace supplied on node.
attr_node = tree.attribute(None, 'setitem3', 'value')
monty[u'urn:bogus:a', 'setitem3'] = attr_node
self.assertEqual(monty.xml_attributes[(u'urn:bogus:a', u'setitem3')],
'value')
# Test with no namespace supplied in key.
attr_node = tree.attribute(u'urn:bogus:a', 'setitem4', 'value')
monty[None, 'setitem4'] = attr_node
self.assertEqual(monty.xml_attributes[(u'urn:bogus:a', u'setitem4')],
'value')
# Test with no namespace supplied at all.
attr_node = tree.attribute(None, 'setitem5', 'value')
monty[None, 'setitem5'] = attr_node
self.assertEqual(monty.xml_attributes[(u'urn:bogus:a', u'setitem5')],
'value')
if __name__ == '__main__':
testsupport.test_main()
|
{
"content_hash": "e81dd6749b3cd757c940a1dad682e9b2",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 78,
"avg_line_length": 32.550561797752806,
"alnum_prop": 0.5999309630652399,
"repo_name": "zepheira/amara",
"id": "e49fdf90cd2675a99fe26c420d4c70454586dc47",
"size": "2923",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/bindery/test_namespaces.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "1830216"
},
{
"name": "C++",
"bytes": "82201"
},
{
"name": "GLSL",
"bytes": "5081"
},
{
"name": "HTML",
"bytes": "578831"
},
{
"name": "JavaScript",
"bytes": "18734"
},
{
"name": "Logos",
"bytes": "175"
},
{
"name": "Objective-C",
"bytes": "26041"
},
{
"name": "Python",
"bytes": "1507578"
},
{
"name": "Shell",
"bytes": "2497"
},
{
"name": "XSLT",
"bytes": "398316"
}
],
"symlink_target": ""
}
|
"""Audit model."""
from ggrc import db
from ggrc.models.deferred import deferred
from ggrc.models.mixins import (
Timeboxed, Noted, Described, Hyperlinked, WithContact,
Titled, Slugged, CustomAttributable
)
from ggrc.models.mixins import clonable
from ggrc.models.relationship import Relatable
from ggrc.models.object_person import Personable
from ggrc.models.context import HasOwnContext
from ggrc.models.reflection import AttributeInfo
from ggrc.models.reflection import PublishOnly
from ggrc.models.program import Program
from ggrc.models.person import Person
from ggrc.models.snapshot import Snapshotable
class Audit(Snapshotable, clonable.Clonable,
CustomAttributable, Personable, HasOwnContext, Relatable,
Timeboxed, Noted, Described, Hyperlinked, WithContact, Titled,
Slugged, db.Model):
"""Audit model."""
__tablename__ = 'audits'
_slug_uniqueness = False
VALID_STATES = (
u'Planned', u'In Progress', u'Manager Review',
u'Ready for External Review', u'Completed'
)
CLONEABLE_CHILDREN = {"AssessmentTemplate"}
report_start_date = deferred(db.Column(db.Date), 'Audit')
report_end_date = deferred(db.Column(db.Date), 'Audit')
audit_firm_id = deferred(
db.Column(db.Integer, db.ForeignKey('org_groups.id')), 'Audit')
audit_firm = db.relationship('OrgGroup', uselist=False)
# TODO: this should be stateful mixin
status = deferred(db.Column(db.Enum(*VALID_STATES), nullable=False),
'Audit')
gdrive_evidence_folder = deferred(db.Column(db.String), 'Audit')
program_id = deferred(
db.Column(db.Integer, db.ForeignKey('programs.id'), nullable=False),
'Audit')
requests = db.relationship(
'Request', backref='audit', cascade='all, delete-orphan')
audit_objects = db.relationship(
'AuditObject', backref='audit', cascade='all, delete-orphan')
object_type = db.Column(
db.String(length=250), nullable=False, default='Control')
_publish_attrs = [
'report_start_date',
'report_end_date',
'audit_firm',
'status',
'gdrive_evidence_folder',
'program',
'requests',
'object_type',
PublishOnly('audit_objects')
]
_sanitize_html = [
'gdrive_evidence_folder',
'description',
]
_include_links = []
_aliases = {
"program": {
"display_name": "Program",
"filter_by": "_filter_by_program",
"mandatory": True,
},
"user_role:Auditor": {
"display_name": "Auditors",
"type": AttributeInfo.Type.USER_ROLE,
"filter_by": "_filter_by_auditor",
},
"status": "Status",
"start_date": "Planned Start Date",
"end_date": "Planned End Date",
"report_start_date": "Planned Report Period from",
"report_end_date": "Planned Report Period to",
"contact": {
"display_name": "Internal Audit Lead",
"mandatory": True,
"filter_by": "_filter_by_contact",
},
"secondary_contact": None,
"notes": None,
"url": None,
"reference_url": None,
}
def _clone(self, source_object):
"""Clone audit and all relevant attributes.
Keeps the internals of actual audit cloning and everything that is related
to audit itself (auditors, audit firm, context setting,
custom attribute values, etc.)
"""
from ggrc_basic_permissions import create_audit_context
data = {
"title": source_object.generate_attribute("title"),
"description": source_object.description,
"audit_firm": source_object.audit_firm,
"start_date": source_object.start_date,
"end_date": source_object.end_date,
"program": source_object.program,
"status": source_object.VALID_STATES[0],
"report_start_date": source_object.report_start_date,
"report_end_date": source_object.report_end_date,
"contact": source_object.contact
}
self.update_attrs(data)
db.session.flush()
create_audit_context(self)
self._clone_auditors(source_object)
self.clone_custom_attribute_values(source_object)
def _clone_auditors(self, audit):
"""Clone auditors of specified audit.
Args:
audit: Audit instance
"""
from ggrc_basic_permissions.models import Role, UserRole
role = Role.query.filter_by(name="Auditor").first()
auditors = [ur.person for ur in UserRole.query.filter_by(
role=role, context=audit.context).all()]
for auditor in auditors:
user_role = UserRole(
context=self.context,
person=auditor,
role=role
)
db.session.add(user_role)
db.session.flush()
def clone(self, source_id, mapped_objects=None):
"""Clone audit with specified whitelisted children.
Children that can be cloned should be specified in CLONEABLE_CHILDREN.
Args:
mapped_objects: A list of related objects that should also be copied and
linked to a new audit.
"""
if not mapped_objects:
mapped_objects = []
source_object = Audit.query.get(source_id)
self._clone(source_object)
if any(mapped_objects):
related_children = source_object.related_objects(mapped_objects)
for obj in related_children:
obj.clone(self)
@classmethod
def _filter_by_program(cls, predicate):
return Program.query.filter(
(Program.id == Audit.program_id) &
(predicate(Program.slug) | predicate(Program.title))
).exists()
@classmethod
def _filter_by_auditor(cls, predicate):
from ggrc_basic_permissions.models import Role, UserRole
return UserRole.query.join(Role, Person).filter(
(Role.name == "Auditor") &
(UserRole.context_id == cls.context_id) &
(predicate(Person.name) | predicate(Person.email))
).exists()
@classmethod
def eager_query(cls):
from sqlalchemy import orm
query = super(Audit, cls).eager_query()
return query.options(
orm.joinedload('program'),
orm.subqueryload('requests'),
orm.subqueryload('object_people').joinedload('person'),
orm.subqueryload('audit_objects'),
)
|
{
"content_hash": "79fcece37be12082ccdf57472d313354",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 78,
"avg_line_length": 30.984924623115578,
"alnum_prop": 0.6490431397988972,
"repo_name": "josthkko/ggrc-core",
"id": "8eab0015b20d187d9ba54d26a6b840a6be26e590",
"size": "6279",
"binary": false,
"copies": "3",
"ref": "refs/heads/develop",
"path": "src/ggrc/models/audit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "163629"
},
{
"name": "Cucumber",
"bytes": "136321"
},
{
"name": "HTML",
"bytes": "1057288"
},
{
"name": "JavaScript",
"bytes": "1492054"
},
{
"name": "Makefile",
"bytes": "6161"
},
{
"name": "Mako",
"bytes": "2178"
},
{
"name": "Python",
"bytes": "2148568"
},
{
"name": "Shell",
"bytes": "29929"
}
],
"symlink_target": ""
}
|
"""The AirNow integration."""
import datetime
import logging
from aiohttp.client_exceptions import ClientConnectorError
from pyairnow import WebServiceAPI
from pyairnow.conv import aqi_to_concentration
from pyairnow.errors import AirNowError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_API_AQI,
ATTR_API_AQI_DESCRIPTION,
ATTR_API_AQI_LEVEL,
ATTR_API_AQI_PARAM,
ATTR_API_CAT_DESCRIPTION,
ATTR_API_CAT_LEVEL,
ATTR_API_CATEGORY,
ATTR_API_PM25,
ATTR_API_POLLUTANT,
ATTR_API_REPORT_DATE,
ATTR_API_REPORT_HOUR,
ATTR_API_STATE,
ATTR_API_STATION,
ATTR_API_STATION_LATITUDE,
ATTR_API_STATION_LONGITUDE,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [Platform.SENSOR]
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up AirNow from a config entry."""
api_key = entry.data[CONF_API_KEY]
latitude = entry.data[CONF_LATITUDE]
longitude = entry.data[CONF_LONGITUDE]
distance = entry.data[CONF_RADIUS]
# Reports are published hourly but update twice per hour
update_interval = datetime.timedelta(minutes=30)
# Setup the Coordinator
session = async_get_clientsession(hass)
coordinator = AirNowDataUpdateCoordinator(
hass, session, api_key, latitude, longitude, distance, update_interval
)
# Sync with Coordinator
await coordinator.async_config_entry_first_refresh()
# Store Entity and Initialize Platforms
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
await hass.config_entries.async_forward_entry_setups(entry, PLATFORMS)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AirNowDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Airly data."""
def __init__(
self, hass, session, api_key, latitude, longitude, distance, update_interval
):
"""Initialize."""
self.latitude = latitude
self.longitude = longitude
self.distance = distance
self.airnow = WebServiceAPI(api_key, session=session)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=update_interval)
async def _async_update_data(self):
"""Update data via library."""
data = {}
try:
obs = await self.airnow.observations.latLong(
self.latitude,
self.longitude,
distance=self.distance,
)
except (AirNowError, ClientConnectorError) as error:
raise UpdateFailed(error) from error
if not obs:
raise UpdateFailed("No data was returned from AirNow")
max_aqi = 0
max_aqi_level = 0
max_aqi_desc = ""
max_aqi_poll = ""
for obv in obs:
# Convert AQIs to Concentration
pollutant = obv[ATTR_API_AQI_PARAM]
concentration = aqi_to_concentration(obv[ATTR_API_AQI], pollutant)
data[obv[ATTR_API_AQI_PARAM]] = concentration
# Overall AQI is the max of all pollutant AQIs
if obv[ATTR_API_AQI] > max_aqi:
max_aqi = obv[ATTR_API_AQI]
max_aqi_level = obv[ATTR_API_CATEGORY][ATTR_API_CAT_LEVEL]
max_aqi_desc = obv[ATTR_API_CATEGORY][ATTR_API_CAT_DESCRIPTION]
max_aqi_poll = pollutant
# Copy other data from PM2.5 Value
if obv[ATTR_API_AQI_PARAM] == ATTR_API_PM25:
# Copy Report Details
data[ATTR_API_REPORT_DATE] = obv[ATTR_API_REPORT_DATE]
data[ATTR_API_REPORT_HOUR] = obv[ATTR_API_REPORT_HOUR]
# Copy Station Details
data[ATTR_API_STATE] = obv[ATTR_API_STATE]
data[ATTR_API_STATION] = obv[ATTR_API_STATION]
data[ATTR_API_STATION_LATITUDE] = obv[ATTR_API_STATION_LATITUDE]
data[ATTR_API_STATION_LONGITUDE] = obv[ATTR_API_STATION_LONGITUDE]
# Store Overall AQI
data[ATTR_API_AQI] = max_aqi
data[ATTR_API_AQI_LEVEL] = max_aqi_level
data[ATTR_API_AQI_DESCRIPTION] = max_aqi_desc
data[ATTR_API_POLLUTANT] = max_aqi_poll
return data
|
{
"content_hash": "63cc4092b6f077afb8ac1f614f4cf257",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 88,
"avg_line_length": 32.4496644295302,
"alnum_prop": 0.647569803516029,
"repo_name": "nkgilley/home-assistant",
"id": "7c26cded4deaf87d2f306b4df285e7a67fcc76d9",
"size": "4835",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/airnow/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from wagtailstreamforms.models import FormSubmission
class Command(BaseCommand):
help = "Deletes form submissions older than the provided number of days"
def add_arguments(self, parser):
parser.add_argument("days_to_keep", type=int)
def get_queryset(self, date):
return FormSubmission.objects.filter(submit_time__lt=date)
def handle(self, *args, **options):
keep_from_date = datetime.today().date() - timedelta(
days=options["days_to_keep"]
)
queryset = self.get_queryset(keep_from_date)
count = queryset.count()
queryset.delete()
msg = "Successfully deleted %s form submissions prior to %s" % (
count,
keep_from_date,
)
self.stdout.write(self.style.SUCCESS(msg))
|
{
"content_hash": "95078f770c0c63b2e6e143e946af8ae9",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 76,
"avg_line_length": 29,
"alnum_prop": 0.6518353726362626,
"repo_name": "AccentDesign/wagtailstreamforms",
"id": "3d778c995900fdfe386f6819c7554874769cabc1",
"size": "899",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wagtailstreamforms/management/commands/prunesubmissions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "690"
},
{
"name": "HTML",
"bytes": "14735"
},
{
"name": "JavaScript",
"bytes": "213"
},
{
"name": "Makefile",
"bytes": "438"
},
{
"name": "Python",
"bytes": "189375"
},
{
"name": "SCSS",
"bytes": "2257"
},
{
"name": "Shell",
"bytes": "559"
}
],
"symlink_target": ""
}
|
import sys
import traceback
try:
from django.conf import settings
from pymysql import install_as_MySQLdb
install_as_MySQLdb()
settings.configure(
DEBUG=True,
USE_TZ=True,
DATABASES={
"default": {
"ENGINE": "django.db.backends.sqlite3",
},
},
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
},
],
ROOT_URLCONF="watchman.urls",
INSTALLED_APPS=[
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sites",
"watchman",
],
SITE_ID=1,
NOSE_ARGS=['-s'],
)
from django_nose import NoseTestSuiteRunner
except ImportError:
traceback.print_exc()
raise RuntimeError("To fix this error, run: pip install django -r requirements-test.txt")
def run_tests(*test_args):
if not test_args:
test_args = ['tests']
# Run tests
test_runner = NoseTestSuiteRunner(verbosity=1)
failures = test_runner.run_tests(test_args)
if failures:
sys.exit(failures)
if __name__ == '__main__':
run_tests(*sys.argv[1:])
|
{
"content_hash": "9759907db22cb4db6556065632769c66",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 93,
"avg_line_length": 22.105263157894736,
"alnum_prop": 0.5507936507936508,
"repo_name": "ulope/django-watchman",
"id": "f9143a809f3790dd3959e1e102bb79193cad31ec",
"size": "1260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "runtests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "4939"
},
{
"name": "Makefile",
"bytes": "1341"
},
{
"name": "Python",
"bytes": "45214"
}
],
"symlink_target": ""
}
|
./rpm_server.py --port 1234 --output-dir /tmp --create-repo ls
|
{
"content_hash": "c9725c70bd02f3701e3ad47ef2a0699c",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 62,
"avg_line_length": 63,
"alnum_prop": 0.6825396825396826,
"repo_name": "Guavus/rpm-uploader",
"id": "272863d2380259aff20995734ba8e791093cc3f0",
"size": "73",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpm_uploader/test/test_server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4389"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse
from common.templates import render_template
def list(request):
# TODO: implement
pass
def register(request):
if 'username' in request.POST:
User.objects.create_user(request.POST['username'], request.POST['email'],
request.POST['password'])
template_name = 'users/success.tpl'
context = {'mode': 'register'}
else:
template_name = 'users/register.tpl'
context = {}
return HttpResponse(render_template(template_name, request, context))
def connect(request):
if 'username' in request.POST:
user = authenticate(username=request.POST['username'],
password=request.POST['password'])
if user is not None:
if user.is_active:
login(request, user)
template_name = 'users/success.tpl'
context = {'mode': 'connect'}
else:
template_name = 'users/fail.tpl'
context = {'mode': 'connect', 'why': 'disabled'}
else:
template_name = 'users/fail.tpl'
context = {'mode': 'connect', 'why': 'incorrect'}
else:
template_name = 'users/connect.tpl'
context = {}
return HttpResponse(render_template(template_name, request, context))
def disconnect(request):
logout(request)
template_name = 'users/success.tpl'
context = {'mode': 'disconnect'}
return HttpResponse(render_template(template_name, request, context))
def profile(request, username):
# TODO: implement
pass
|
{
"content_hash": "e3f60ac095d5722bd8ddcba52827072c",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 81,
"avg_line_length": 33.6078431372549,
"alnum_prop": 0.6067677946324388,
"repo_name": "ProgVal/ProgVal.42",
"id": "6875867aa81b3eeb595b48534c2214f9c111ef88",
"size": "1714",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "users/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "3691"
}
],
"symlink_target": ""
}
|
from django.utils.translation import ugettext_lazy as _
from livesettings import *
from tax.config import TAX_MODULE
TAX_MODULE.add_choice(('tax.modules.percent', _('Percent Tax')))
TAX_GROUP = config_get_group('TAX')
config_register(
DecimalValue(TAX_GROUP,
'PERCENT',
description=_("Percent tax"),
requires=TAX_MODULE,
requiresvalue='tax.modules.percent',
default="0")
)
config_register(
BooleanValue(TAX_GROUP,
'TAX_SHIPPING_PERCENT',
description=_("Tax Shipping?"),
requires=TAX_MODULE,
requiresvalue='tax.modules.percent',
default=False)
)
|
{
"content_hash": "be1724376b5667e7882102d1aced1bc1",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 64,
"avg_line_length": 26.5,
"alnum_prop": 0.6540880503144654,
"repo_name": "jordotech/satchmofork",
"id": "2ba93d53162f8ad3e4ed90914b9e236d51c44ae5",
"size": "636",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "satchmo/apps/tax/modules/percent/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "15868"
},
{
"name": "HTML",
"bytes": "209299"
},
{
"name": "JavaScript",
"bytes": "73019"
},
{
"name": "Python",
"bytes": "2115973"
}
],
"symlink_target": ""
}
|
import os
import commands
def pilot_killer():
user=os.getenv('USER')
sandbox = os.listdir('/home/%s/radical.pilot.sandbox/'%user)
for folder in sandbox:
if folder.startswith('rp.'):
bootstrapper3_file=open('/home/%s/radical.pilot.sandbox/%s/agent_0.bootstrap_3.log'%(user,folder),'r')
bootstrapper3_lines=bootstrapper3_file.readlines()
bootstrapper3_pid=int(bootstrapper3_lines[1].split('pid:')[1])
try:
os.kill(bootstrapper3_pid,15)
except:
pass
if __name__ == "__main__":
print 'Killing all active pilots'
pilot_killer()
|
{
"content_hash": "1cf9e91d390c12b8dc238888664a0eb7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 114,
"avg_line_length": 28.217391304347824,
"alnum_prop": 0.5947611710323575,
"repo_name": "radical-cybertools/supercomputing2015-tutorial",
"id": "0124c1c6a3a4490ded099e8e5193fa23d53cc0d4",
"size": "649",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "02_pilot/pilot_killer.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6208"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models
from django.utils.translation import ugettext_lazy as _
from shop.models import order
class Order(order.BaseOrder):
"""Default materialized model for Order"""
shipping_address_text = models.TextField(_("Shipping address"), blank=True, null=True,
help_text=_("Shipping address at the moment of purchase."))
billing_address_text = models.TextField(_("Billing address"), blank=True, null=True,
help_text=_("Billing address at the moment of purchase."))
@property
def identifier(self):
return "{}-{}".format(self.created_at.year, self.pk)
def populate_from_cart(self, cart, request):
self.shipping_address_text = cart.shipping_address.as_text()
self.billing_address_text = cart.shipping_address.as_text()
super(Order, self).populate_from_cart(cart, request)
|
{
"content_hash": "65127baa6f6bfcc49cecc2c34c1d2764",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 90,
"avg_line_length": 42.80952380952381,
"alnum_prop": 0.699666295884316,
"repo_name": "schacki/django-shop",
"id": "23be23a758a6427219fe141d9d2f7b9ad4a06648",
"size": "923",
"binary": false,
"copies": "1",
"ref": "refs/heads/0.3.0.dev",
"path": "shop/models/defaults/order.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3556"
},
{
"name": "HTML",
"bytes": "39224"
},
{
"name": "JavaScript",
"bytes": "37017"
},
{
"name": "Python",
"bytes": "234644"
},
{
"name": "Shell",
"bytes": "916"
}
],
"symlink_target": ""
}
|
import math
import os
import string
import sys
from pysqlite2 import dbapi2 as sqlite
from scipy.ndimage import gaussian_filter
# My own modules
import main
import fits
import gildas
import astrolyze.functions.constants as const
class MiriadMap(main.Map):
def __init__(self, map_name):
r"""
Calling the Map parent class and check that the file is really a
miriad map.
"""
main.Map.__init__(self, map_name)
if self.dataFormat is not None:
print 'Exiting: not the right format'
print map.resolution
sys.exit()
def toFits(self):
r"""
Converts the actual map to a Fits map.
Returns
-------
FitsMap Object.
Examples
--------
With:
>>> map = miriadMap('M33_MIPS_24mum_JyB_5')
>>> map = map.toFits()
it is possible to continue working with the Fits map, using
:class:`maps.fits.FitsMap` class.
"""
os.system('rm ' + str(self.returnName(dataFormat='fits')))
print 'rm ' + str(self.returnName(dataFormat='fits'))
string = ('fits in=' + str(self.map_name) + ' out=' +
str(self.returnName(dataFormat='fits')) + ' op=xyout')
print string
os.system(string)
self.fitsName = str(self.returnName()) + '.fits'
return fits.FitsMap(str(self.returnName()) + '.fits')
def toGildas(self):
r"""
Converts the actual map to a Gildas map.
Returns
-------
GildasMap Object.
Examples
--------
With:
>>> map = miriadMap('M33_MIPS_24mum_JyB_5')
>>> map = map.toGildas()
it is possible to continue working with the Fits map, using
:class:`maps.gildas.GildasMap` class.
"""
self.toFits()
os.system('rm ' + str(self.returnName()) + '.gdf')
self.convFile = open('temp.greg', 'w')
self.convFile.write('fits ' + str(self.returnName(dataFormat='fits'))
+ ' to ' + str(self.returnName(dataFormat='gdf'))
+ '\nexit\n')
self.convFile.close()
os.system('greg -nw @temp.greg')
self.gildasName = str(self.map_name) + '.gdf'
os.system('rm temp.greg')
return gildas.GildasMap(self.gildasName)
def toMiriad(self):
r"""
Copies the actual map changing the name such that it takes changes in
keywords into account.
Returns
-------
MiriadMap Object.
Examples
--------
With:
>>> map = miriadMap('M33_MIPS_24mum_JyB_5')
>>> map = map.toMiriad()
it is possible to continue working with the Miriad map, using
:class:`maps.gildas.MiriadMap` class.
"""
os.system('cp -rf ' + str(self.map_name) + ' ' +
str(self.returnName()))
print 'cp -rf ' + str(self.map_name) + ' ' + str(self.returnName())
self.map_name = self.returnName()
return self
def smooth(self, new_resolution, old_resolution=None, scale=''):
r"""
Smoothes a miriad map to the new resolution.
Parameters
----------
new_resolution : float or list
The resolution in of the smoothed image.
Can be a:
* float: Output beam has same major and minor axis [arcsec] and
the position angle (PA) [degrees] is 0.
* A list with two entries:
The major and minor axis. PA is 0.
E.g. [major_axis, minor_axis ]
* A list with three entries:
[major_axis, minor_axis, PA]
old_resolution : float
If None the self.resolution information is taken into account.
Otherwise, it is assumed that old_resolution is the actual
resolution of the map.
scale : string
If unset (scale=''), the miriad function will attempt to make the
units of the smoothed image be Jy/beam for Gaussian convolution. If
``0.0``, then the convolution integral is scaled (multipled) by the
inverse of the volume of the convolving function. Otherwise, the
integral is scaled by "scale"
Returns
-------
MiriadMap : object
The smoothed image.
Notes
-----
The function used to calculate the fwhm (\Omega) of the convolving
Gaussian for both major and minor axis is:
.. math::
\Omega_{\rm convolve} = \sqrt{\Omega_{\rm new}^2 -
\Omega_{\rm old}^2}
"""
# Parsing the resolution string.
try:
new_major = new_resolution[0]
new_minor = new_resolution[1]
new_pa = new_resolution[2]
except:
try:
new_major = new_resolution[0]
new_minor = new_resolution[1]
new_pa = 0
except:
new_major = new_resolution
new_minor = new_resolution
new_pa = 0
if old_resolution is None:
_old_major = self.resolution[0]
_old_minor = self.resolution[1]
pa = self.resolution[2]
if old_resolution is not None:
if old_resolution is list:
if len(old_resolution) == 2:
_old_major = old_resolution[0]
_old_minor = old_resolution[1]
pa = 0
if len(old_resolution) == 3:
_old_major = old_resolution[0]
_old_minor = old_resolution[1]
pa = old_resolution[2]
if old_resolution is not list:
_old_major = old_resolution
_old_minor = old_resolution
pa = 0
if ((float(_old_major) > float(new_major) or float(_old_minor) >
float(new_minor))):
print 'Error: Old Resolution bigger than new one!'
# calculate the fwhm for the convolving gaussian.
_fwhm_major = math.sqrt(float(new_major) ** 2 -
float(_old_major) ** 2)
_fwhm_minor = math.sqrt(float(new_minor) ** 2 -
float(_old_minor) ** 2)
_smoothed_map_name = self.returnName(resolution=[float(new_major),
float(new_minor),
new_pa])
os.system('rm -rf ' + _smoothed_map_name)
if scale != '':
executeString = ('smooth in=' + str(self.map_name) + ' '
'out=' + _smoothed_map_name + ' '
'fwhm=' + str('%.2f' % (_fwhm_major)) + ', ' +
str('%.2f' % (_fwhm_minor)) + ' pa=' + str(pa) +
' scale=' + str(scale))
else:
executeString = ('smooth in=' + str(self.map_name) + ' '
'out=' + _smoothed_map_name + ' '
'fwhm=' + str('%.2f' % (_fwhm_major)) + ', ' +
str('%.2f' % (_fwhm_minor)) + ' pa=' + str(pa) +
' scale=' + str(scale))
print executeString
os.system(executeString)
return MiriadMap(_smoothed_map_name)
def _moment(self, iN='', region='', out='', mom='0', axis='',
clip='', rngmsk='', raw=''):
'''
Wrap around MIRIADs moment task.
keywords are as in miriad.
By default (-> if you give no arguments to the function)
it creates the zeroth moment of the map
'''
fileout = open('miriad.out', 'a')
string = 'moment '
if iN == '':
iN = self.map_name
string += 'in=' + str(iN) + ' '
if region != '':
string += 'region=' + str(region) + ' '
if out == '':
self.newComments = []
if 'cube' in self.comments:
print 'yes1'
for i in self.comments:
if str(i) == 'cube':
print 'yes1'
self.newComments += ['mom' + str(mom)]
else:
self.newComments += [str(i)]
self.comments = self.newComments
else:
self.comments += ['mom' + str(mom)]
print self.comments
out = self.returnName()
string += 'out=' + str(out) + ' '
string += 'mom=' + str(mom) + ' '
if axis != '':
string += 'axis=' + str(axis) + ' '
if clip != '':
string += 'clip=' + str(clip) + ' '
if rngmsk != '':
string += 'rngmsk=' + str(rngmsk) + ' '
if raw != '':
string += 'raw=' + str(raw) + ' '
os.system('rm -rf ' + str(self.returnName()))
print string
os.system(string)
fileout.write(string + '\n')
self.map_name = out
fileout.close()
def regrid(self, iN='', out='', axes='1,2', tin='', desc='',
options='', project='', rotate='', tol=''):
fileout = open('miriad.out', 'a')
string = 'regrid '
if iN == '':
iN = self.map_name
string += 'in=' + str(iN) + ' '
if out == '':
self.comments += ['regrid']
out = self.returnName()
string += 'out=' + str(out) + ' '
if axes != '':
string += 'axes=' + str(axes) + ' '
if tin != '':
string += 'tin=' + str(tin) + ' '
if desc != '':
string += 'desc=' + str(desc) + ' '
if options != '':
string += 'options=' + str(options) + ' '
if project != '':
string += 'project=' + str(project) + ' '
if rotate != '':
string += 'rotate=' + str(rotate) + ' '
if tol != '':
string += 'tol=' + str(tol) + ' '
os.system('rm -rf ' + str(self.returnName()))
print string
os.system(string)
fileout.write(string + '\n')
self.map_name = out
fileout.close()
def ellipseMask(self, pa, incline, radius, coord, out,
pix_or_coord='coord', logic='lt'):
tempFitsMap = self.toFits()
xy = tempFitsMap.sky2xy(coord)
x0 = str(int(floor(float(xy[0]))))
y0 = str(int(floor(float(xy[1]))))
os.system('rm -rf ' + out)
print '#################'
print self.inclination
print self.pa
print '#################'
sineCosArg = str(float(2 * math.pi / 360 * (-90 + float(self.pa))))
inclinationRadian = str(2 * math.pi / 360 * self.inclination)
os.system('maths \'exp=<' + self.map_name + '>\' \''
'mask=sqrt((((x-' + x0 + ')*cos(' + sineCosArg + '))-'
'((y-' + y0 + ')*sin(' + sineCosArg + ')))**2 + '
'(((x-' + x0 + ')*sin(' + sineCosArg + '))+'
'((y-' + y0 + ')*cos(' + sineCosArg + ')))**2/'
'(cos(' + inclinationRadian + ')**2)).' + logic + '.' +
str(radius * 60 /
math.sqrt((float(tempFitsMap.header['CDELT1']) /
(1. / 60 / 60)) ** 2)) + '\' '
'out=' + out + ' xrange=0,' +
str(tempFitsMap.header['NAXIS1']) + ' '
'yrange=0,' + str(tempFitsMap.header['NAXIS2']))
self.map_name = out
def _regridMiriadToArcsec(self, value, JyB_KkmS='KkmS'):
fitsFile = self.toFits()
self.naxis1 = float(fitsFile.header['naxis1'])
self.naxis2 = float(fitsFile.header['naxis2'])
self.cdelt1 = float(fitsFile.header['CDELT1'])
self.cdelt2 = float(fitsFile.header['CDELT2'])
self.crval1 = float(fitsFile.header['CRVAL1'])
self.crval2 = float(fitsFile.header['CRVAL2'])
self.crpix1 = float(fitsFile.header['CRPIX1'])
self.crpix2 = float(fitsFile.header['CRPIX2'])
print (self.naxis1, self.naxis2, self.cdelt1, self.cdelt2, self.crval1,
self.crval2, self.crpix1, self.crpix2)
self.cdelt1arcs = float(self.cdelt1) / (value * (1. / 60 / 60))
self.cdelt2arcs = float(self.cdelt1) / (value * (1. / 60 / 60))
self.naxis1New = self.naxis1 * math.sqrt(self.cdelt1arcs ** 2)
self.crpix1New = self.naxis1New / (self.naxis1 / self.crpix1)
self.naxis2New = self.naxis2 * math.sqrt(self.cdelt2arcs ** 2)
self.crpix2New = self.naxis2New / (self.naxis2 / self.crpix2)
self.newPix = float(value) / 60 / 60
print 'oldPixel', self.cdelt1 * 60 * 60, self.cdelt2 * 60 * 60
print 'axis1', self.naxis1, '->', self.naxis1New
print 'axis2', self.naxis2, '->', self.naxis2New
self.regrid(desc=str(self.crval1 * 2 * math.pi / 360) + ',' +
str(self.crpix1New) + ',' +
str(float(value) * -2 * math.pi / 360 / 60 / 60) + ',' +
str(self.naxis1New) + ',' +
str(self.crval2 * 2 * math.pi / 360) + ',' +
str(self.crpix2New) + ',' +
str(float(value) * 2 * math.pi / 360 / 60 / 60) + ',' +
str(self.naxis2New))
if JyB_KkmS == 'KkmS':
os.system('rm -rf ' + str(self.returnName()) + '_norm')
os.system('maths \'exp=<' + str(self.returnName()) + '>/' +
str(self.cdelt1arcs * self.cdelt2arcs) + '\'out=' +
str(self.returnName()) + '_norm')
self.comments += ['norm']
self.map_name = self.returnName()
|
{
"content_hash": "9241a95aece59db5b41fbfc3bfb51a73",
"timestamp": "",
"source": "github",
"line_count": 379,
"max_line_length": 79,
"avg_line_length": 36.54089709762533,
"alnum_prop": 0.47671312008087224,
"repo_name": "buchbend/astrolyze",
"id": "cea5c090f8231467e40c0bee23f2c433cd844aa1",
"size": "13919",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "astrolyze/maps/miriad.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "940056"
}
],
"symlink_target": ""
}
|
import numpy as np
from netcdf import netcdf as nc
import logging
from models.core import cuda, SourceModule
from cpu import CPUStrategy
import itertools
import math
with open('models/kernel.cu') as f:
mod_sourcecode = SourceModule(f.read())
def gpu_exec(func_name, results, *matrixs):
func = mod_sourcecode.get_function(func_name)
is_num = lambda x: isinstance(x, (int, long, float, complex))
adapt_matrix = lambda m: m if isinstance(m, np.ndarray) else m[:]
adapt = lambda x: np.array([[[x]]]) if is_num(x) else adapt_matrix(x)
matrixs_ram = map(lambda m: adapt(m).astype(np.float32,
casting='same_kind'),
matrixs)
shape_size = np.median(np.array(map(lambda m: len(m.shape), matrixs_ram)))
reshape = lambda m: m.reshape(m.shape[-int(shape_size):])
matrixs_ram = map(reshape, matrixs_ram)
matrixs_gpu = map(lambda m: cuda.mem_alloc(m.nbytes), matrixs_ram)
transferences = zip(matrixs_ram, matrixs_gpu)
list(map(lambda (m, m_gpu): cuda.memcpy_htod(m_gpu, m), transferences))
m_shapes = map(lambda m: list(m.shape), matrixs_ram)
for m_s in m_shapes:
while len(m_s) < 3:
m_s.insert(0, 1)
blocks = map(lambda ms: ms[1:3], m_shapes)
size = lambda m: m[0] * m[1]
max_blocks = max(map(size, blocks))
blocks = list(reversed(filter(lambda ms: size(ms) == max_blocks,
blocks)[0]))
threads = max(map(lambda ms: ms[0], m_shapes))
max_dims = getblockmaxdims(blocks[0], blocks[1], threads)
blocks[0] = blocks[0] / max_dims[0]
blocks[1] = blocks[1] / max_dims[1]
logging.info('-> grid dims: %s, block dims: %s, threads per block: %s\n' %
(str(blocks), str([max_dims[0], max_dims[1], threads]),
str(max_dims[0]*max_dims[1]*threads)))
func(*matrixs_gpu, grid=tuple(blocks),
block=tuple([max_dims[0], max_dims[1], threads]))
list(map(lambda (m, m_gpu): cuda.memcpy_dtoh(m, m_gpu),
transferences[:results]))
for i in range(results):
matrixs[i][:] = matrixs_ram[i]
matrixs_gpu[i].free()
return matrixs_ram[:results]
def getblockmaxdims(dimx, dimy, dimz):
max_threads_per_block = 1024
squares = cartesianproduct(list(divisors(dimx)), list(divisors(dimy)))
validdims = filter(lambda x: x[0]*x[1]*dimz <= max_threads_per_block, squares)
return max(validdims, key=lambda x: x[0]*x[1])
def divisors(n):
large_divisors = []
for i in xrange(1, int(math.sqrt(n) + 1)):
if n % i is 0:
yield i
if i is not n / i:
large_divisors.insert(0, n / i)
for divisor in large_divisors:
yield divisor
def cartesianproduct(x, y):
return np.dstack(np.meshgrid(x, y)).reshape(-1, 2)
class GPUStrategy(CPUStrategy):
def update_temporalcache(self, loader, cache):
const = lambda c: np.array(c).reshape(1, 1, 1)
inputs = [loader.lat[0],
loader.lon[0],
self.decimalhour,
self.months,
self.gamma,
loader.dem,
loader.linke,
const(self.algorithm.SAT_LON),
const(self.algorithm.i0met),
const(1367.0),
const(8434.5)]
outputs = [self.declination,
self.solarangle,
self.solarelevation,
self.excentricity,
self.gc,
self.atmosphericalbedo,
self.t_sat,
self.t_earth,
self.cloudalbedo]
matrixs = list(itertools.chain(*[outputs, inputs]))
gpu_exec("update_temporalcache", len(outputs),
*matrixs)
nc.sync(cache)
"""
def estimate_globalradiation(self, loader, cache, output):
print "Estimate!"
const = lambda c: np.array(c).reshape(1, 1, 1)
inputs = [cache.slots,
cache.declination,
cache.solarangle,
cache.solarelevation,
cache.excentricity,
loader.lat[0],
loader.calibrated_data,
cache.gc,
cache.t_sat,
cache.t_earth,
cache.atmosphericalbedo,
cache.cloudalbedo,
const(self.algorithm.i0met),
const(self.algorithm.IMAGE_PER_HOUR)]
outputs = [output.ref_cloudindex,
output.ref_globalradiation]
matrixs = list(itertools.chain(*[outputs, inputs]))
gpu_exec("estimate_globalradiation", len(outputs),
*matrixs)
print "----"
maxmin = map(lambda o: (o[:].min(), o[:].max()), outputs)
for mm in zip(range(len(maxmin)), maxmin):
name = outputs[mm[0]].name if hasattr(outputs[mm[0]],
'name') else mm[0]
print name, ': ', mm[1]
print "----"
nc.sync(output.root)
super(GPUStrategy, self).estimate_globalradiation(loader, cache,
output)
"""
strategy = GPUStrategy
|
{
"content_hash": "32c45db9c0c1bff9e6835d2a73a2a240",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 82,
"avg_line_length": 38.04255319148936,
"alnum_prop": 0.5326249067859806,
"repo_name": "ahMarrone/solar_radiation_model",
"id": "61881e333bcd48d078e4f5400e82e7cea4e72d7f",
"size": "5364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/gpu.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cuda",
"bytes": "10880"
},
{
"name": "Makefile",
"bytes": "3635"
},
{
"name": "Python",
"bytes": "64354"
}
],
"symlink_target": ""
}
|
import tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Fisher'] , ['ConstantTrend'] , ['Seasonal_MonthOfYear'] , ['MLP'] );
|
{
"content_hash": "d4ee9a97473f99d1a42ece78de6b7048",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 91,
"avg_line_length": 41,
"alnum_prop": 0.7195121951219512,
"repo_name": "antoinecarme/pyaf",
"id": "b0034c6f214349217d3bfbaf591a1faca8f75e55",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/model_control/detailed/transf_Fisher/model_control_one_enabled_Fisher_ConstantTrend_Seasonal_MonthOfYear_MLP.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "6773299"
},
{
"name": "Procfile",
"bytes": "24"
},
{
"name": "Python",
"bytes": "54209093"
},
{
"name": "R",
"bytes": "807"
},
{
"name": "Shell",
"bytes": "3619"
}
],
"symlink_target": ""
}
|
from precog import *
from pytest import config
from base64 import urlsafe_b64encode
import sys
def setup_module(m):
ROOTKEY = config.getvalue('apiKey')
HOST = config.getvalue('host')
PORT = config.getvalue('port')
m.TestEverything.root = Precog(ROOTKEY, None, None, host=HOST, port=PORT)
accountid = m.TestEverything.root.search_account("test-py@precog.com")[0]['accountId']
apikey = m.TestEverything.root.account_details("test-py@precog.com", "password", accountid)['apiKey']
m.TestEverything.api = Precog(apikey, accountid, accountid, host=HOST, port=PORT)
m.TestEverything.accountid = accountid
m.TestEverything.apikey = apikey
print 'ok ok', m.TestEverything.api
def teardown_module(m):
pass
class TestEverything:
def queryUntil(self, bp, q, expected, timeout=30):
t0 = time.time()
res = None
while time.time() - t0 < timeout:
print " trying..."
res = self.api.query(q, bp)
if res == expected:
return
time.sleep(0.5)
assert res == expected, res
def test_csv(self):
# csv
csvdata = "foo,bar,qux\n1,2,3\n4,5,6\n"
response = self.api.append_all_from_string('foo', Format.csv, csvdata)
assert response.get('errors') == [], response
assert response.get('ingested') == 2, response
print 'append_all_from_string(csv): ok'
def test_json(self):
# json
jsondata = {"a": "foo", "b": {"nested": True}, "c": [1,2,3], "d": 4}
response = self.api.append_all("foo", jsondata)
assert response.get('errors') == [], response
assert response.get('ingested') == 1, response
print 'append_all: ok'
def test_append(self):
response = self.api.append("bar", [1,2,3,4])
assert response.get('ingested') == 1, response
print 'append: ok'
def test_query1(self):
response = self.api.query("count(//nonexistent)", "qux")
assert response == [0], response
print 'empty count: ok'
def test_query2(self):
response = self.api.query("count(//nonexistent)", "qux", detailed=True)
assert response['serverErrors'] == [], response
assert response['errors'] == [], response
assert response['data'] == [0], response
print 'detailed empty count: ok'
def test_populate1(self):
self.api.delete("qux/test")
self.queryUntil("qux", "count(//test)", [0])
print "delete qux/test: ok"
objs = []
for i in range(0, 100): objs.append({"i": i, "j": i % 13, "k": "foo"})
response = self.api.append_all("qux/test", objs)
assert response['ingested'] == 100, response
print "populate qux/test: ok"
self.queryUntil("qux", "count(//test)", [100])
print "count qux/test: ok"
def test_upload(self):
newer = []
for i in range(0, 60):
newer.append({"iii": i, "newer": True})
s = json.dumps(newer)
response = self.api.upload_string("qux/test", Format.json, s)
assert response['ingested'] == 60, response
print "upload new qux/test: ok"
self.queryUntil("qux", "count(//test)", [60])
print "count qux/test again: ok"
|
{
"content_hash": "d6630dce3ef5898d7633152b7b9e2c99",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 105,
"avg_line_length": 34.91489361702128,
"alnum_prop": 0.5865326020719074,
"repo_name": "precog/precog_python_client",
"id": "e87fa2bb0a056bd5c4f3a3ef73f2e1d95933a0fb",
"size": "3408",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "precog/test/test_everything.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31480"
},
{
"name": "Shell",
"bytes": "6710"
}
],
"symlink_target": ""
}
|
import pandas as pa
class Benchmark:
def __init__(self, name, unit, less_is_better, values, stats=None):
self.name = name
self.unit = unit
self.less_is_better = less_is_better
self.values = pa.Series(values)
self.statistics = self.values.describe()
@property
def value(self):
median = "50%"
return float(self.statistics[median])
def __repr__(self):
return f"Benchmark[name={self.name},value={self.value}]"
class BenchmarkSuite:
def __init__(self, name, benchmarks):
self.name = name
self.benchmarks = benchmarks
def __repr__(self):
name = self.name
benchmarks = self.benchmarks
return f"BenchmarkSuite[name={name}, benchmarks={benchmarks}]"
|
{
"content_hash": "7b216f1848e6135b117b1aa3432818c9",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 71,
"avg_line_length": 26.75862068965517,
"alnum_prop": 0.6082474226804123,
"repo_name": "majetideepak/arrow",
"id": "83bc2735dd91841686398815d1a42e6885a34432",
"size": "1562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/archery/archery/benchmark/core.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "73655"
},
{
"name": "Awk",
"bytes": "3683"
},
{
"name": "Batchfile",
"bytes": "32714"
},
{
"name": "C",
"bytes": "334766"
},
{
"name": "C#",
"bytes": "505406"
},
{
"name": "C++",
"bytes": "8830397"
},
{
"name": "CMake",
"bytes": "443673"
},
{
"name": "CSS",
"bytes": "3946"
},
{
"name": "Dockerfile",
"bytes": "51066"
},
{
"name": "Emacs Lisp",
"bytes": "931"
},
{
"name": "FreeMarker",
"bytes": "2271"
},
{
"name": "Go",
"bytes": "835735"
},
{
"name": "HTML",
"bytes": "22930"
},
{
"name": "Java",
"bytes": "2941380"
},
{
"name": "JavaScript",
"bytes": "99135"
},
{
"name": "Lua",
"bytes": "8771"
},
{
"name": "M4",
"bytes": "8712"
},
{
"name": "MATLAB",
"bytes": "36600"
},
{
"name": "Makefile",
"bytes": "49294"
},
{
"name": "Meson",
"bytes": "37613"
},
{
"name": "Objective-C",
"bytes": "11580"
},
{
"name": "PLpgSQL",
"bytes": "56995"
},
{
"name": "Perl",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "1885355"
},
{
"name": "R",
"bytes": "214313"
},
{
"name": "Ruby",
"bytes": "729461"
},
{
"name": "Rust",
"bytes": "2011342"
},
{
"name": "Shell",
"bytes": "358704"
},
{
"name": "TSQL",
"bytes": "29787"
},
{
"name": "Thrift",
"bytes": "138360"
},
{
"name": "TypeScript",
"bytes": "1125277"
}
],
"symlink_target": ""
}
|
import os
import sys
settings_module = os.environ.get('SDCURSES_SETTINGS')
if settings_module:
import importlib
settings = importlib.import_module(settings_module)
else:
from . import defaults as settings
this_module = sys.modules[__name__]
for (key, value) in settings.__dict__.iteritems():
setattr(this_module, key, value)
del this_module
if os.environ.get('SDCURSES_KEY_ID'):
KEY_ID = os.environ['SDCURSES_KEY_ID']
if os.environ.get('SDCURSES_LOCATION'):
LOCATIONS = [os.environ['SDCURSES_LOCATION']]
|
{
"content_hash": "704ef86764b246fb74172079a9e139f4",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 55,
"avg_line_length": 25.523809523809526,
"alnum_prop": 0.707089552238806,
"repo_name": "glassresistor/sdcurses",
"id": "729ba6ed506fc0ee4a5270ba9790a6e2c688850a",
"size": "536",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sdcurses/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "4619"
}
],
"symlink_target": ""
}
|
from gitlabform.gitlab.core import NotFoundException, InvalidParametersException
from gitlabform.gitlab.groups import GitLabGroups
class GitLabGroupLDAPLinks(GitLabGroups):
def get_ldap_group_links(self, group):
group_id = self.get_group_id_case_insensitive(group)
return self._make_requests_to_api(
"groups/%s/ldap_group_links", group_id, expected_codes=[200, 404]
)
def add_ldap_group_link(self, group, data):
group_id = self.get_group_id_case_insensitive(group)
data["id"] = group_id
try:
return self._make_requests_to_api(
"groups/%s/ldap_group_links",
group_id,
method="POST",
data=data,
expected_codes=[200, 201],
)
# this is a GitLab API bug - it returns 404 here instead of 400 for bad requests...
except NotFoundException:
raise InvalidParametersException(
f"Invalid parameters for a Group LDAP link for group {group}: {data}"
)
def delete_ldap_group_link(self, group, data):
if "group_access" in data:
del data["group_access"]
group_id = self.get_group_id_case_insensitive(group)
data["id"] = group_id
# 404 means that the LDAP group link is already removed, so let's accept it for idempotency
self._make_requests_to_api(
"groups/%s/ldap_group_links",
group_id,
method="DELETE",
data=data,
expected_codes=[204, 404],
)
|
{
"content_hash": "57a5a0701ae2d305591680fd5a387a9c",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 99,
"avg_line_length": 36.09090909090909,
"alnum_prop": 0.5869017632241813,
"repo_name": "egnyte/gitlabform",
"id": "aec0f824fd0a9a72598317db03a03deb81d15dae",
"size": "1588",
"binary": false,
"copies": "1",
"ref": "refs/heads/dependabot/pip/coverage-6.2",
"path": "gitlabform/gitlab/group_ldap_links.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "591"
},
{
"name": "Python",
"bytes": "137279"
},
{
"name": "Shell",
"bytes": "3171"
}
],
"symlink_target": ""
}
|
import optparse
import os
import sys
from telemetry.core import util
from telemetry.results import buildbot_output_formatter
from telemetry.results import chart_json_output_formatter
from telemetry.results import csv_output_formatter
from telemetry.results import csv_pivot_table_output_formatter
from telemetry.results import gtest_progress_reporter
from telemetry.results import html_output_formatter
from telemetry.results import json_output_formatter
from telemetry.results import page_test_results
from telemetry.results import progress_reporter
# Allowed output formats. The default is the first item in the list.
_OUTPUT_FORMAT_CHOICES = ('html', 'buildbot', 'csv', 'gtest', 'json',
'chartjson', 'csv-pivot-table', 'none')
# Filenames to use for given output formats.
_OUTPUT_FILENAME_LOOKUP = {
'html': 'results.html',
'csv': 'results.csv',
'json': 'results.json',
'chartjson': 'results-chart.json',
'csv-pivot-table': 'results-pivot-table.csv'
}
def AddResultsOptions(parser):
group = optparse.OptionGroup(parser, 'Results options')
group.add_option('--chartjson', action='store_true',
help='Output Chart JSON. Ignores --output-format.')
group.add_option('--output-format', action='append', dest='output_formats',
choices=_OUTPUT_FORMAT_CHOICES, default=[],
help='Output format. Defaults to "%%default". '
'Can be %s.' % ', '.join(_OUTPUT_FORMAT_CHOICES))
group.add_option('-o', '--output',
dest='output_file',
default=None,
help='Redirects output to a file. Defaults to stdout.')
group.add_option('--output-dir', default=util.GetBaseDir(),
help='Where to save output data after the run.')
group.add_option('--output-trace-tag',
default='',
help='Append a tag to the key of each result trace. Use '
'with html, buildbot, csv-pivot-table output formats.')
group.add_option('--reset-results', action='store_true',
help='Delete all stored results.')
group.add_option('--upload-results', action='store_true',
help='Upload the results to cloud storage.')
group.add_option('--upload-bucket', default='internal',
choices=['public', 'partner', 'internal'],
help='Storage bucket to use for the uploaded results. '
'Defaults to internal. Supported values are: '
'public, partner, internal')
group.add_option('--results-label',
default=None,
help='Optional label to use for the results of a run .')
group.add_option('--suppress_gtest_report',
default=False,
help='Whether to suppress GTest progress report.')
parser.add_option_group(group)
def ProcessCommandLineArgs(parser, args):
# TODO(ariblue): Delete this flag entirely at some future data, when the
# existence of such a flag has been long forgotten.
if args.output_file:
parser.error('This flag is deprecated. Please use --output-dir instead.')
try:
os.makedirs(args.output_dir)
except OSError:
# Do nothing if the output directory already exists. Existing files will
# get overwritten.
pass
args.output_dir = os.path.expanduser(args.output_dir)
def _GetOutputStream(output_format, output_dir):
assert output_format in _OUTPUT_FORMAT_CHOICES, 'Must specify a valid format.'
assert output_format not in ('gtest', 'none'), (
'Cannot set stream for \'gtest\' or \'none\' output formats.')
if output_format == 'buildbot':
return sys.stdout
assert output_format in _OUTPUT_FILENAME_LOOKUP, (
'No known filename for the \'%s\' output format' % output_format)
output_file = os.path.join(output_dir, _OUTPUT_FILENAME_LOOKUP[output_format])
open(output_file, 'a').close() # Create file if it doesn't exist.
return open(output_file, 'r+')
def _GetProgressReporter(output_skipped_tests_summary, suppress_gtest_report):
if suppress_gtest_report:
return progress_reporter.ProgressReporter()
return gtest_progress_reporter.GTestProgressReporter(
sys.stdout, output_skipped_tests_summary=output_skipped_tests_summary)
def CreateResults(benchmark_metadata, options,
value_can_be_added_predicate=lambda v, is_first: True):
"""
Args:
options: Contains the options specified in AddResultsOptions.
"""
if not options.output_formats:
options.output_formats = [_OUTPUT_FORMAT_CHOICES[0]]
output_formatters = []
for output_format in options.output_formats:
if output_format == 'none' or output_format == "gtest" or options.chartjson:
continue
output_stream = _GetOutputStream(output_format, options.output_dir)
if output_format == 'csv':
output_formatters.append(csv_output_formatter.CsvOutputFormatter(
output_stream))
elif output_format == 'csv-pivot-table':
output_formatters.append(
csv_pivot_table_output_formatter.CsvPivotTableOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'buildbot':
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
output_stream, trace_tag=options.output_trace_tag))
elif output_format == 'html':
# TODO(chrishenry): We show buildbot output so that users can grep
# through the results easily without needing to open the html
# file. Another option for this is to output the results directly
# in gtest-style results (via some sort of progress reporter),
# as we plan to enable gtest-style output for all output formatters.
output_formatters.append(
buildbot_output_formatter.BuildbotOutputFormatter(
sys.stdout, trace_tag=options.output_trace_tag))
output_formatters.append(html_output_formatter.HtmlOutputFormatter(
output_stream, benchmark_metadata, options.reset_results,
options.upload_results, options.browser_type,
options.results_label, trace_tag=options.output_trace_tag))
elif output_format == 'json':
output_formatters.append(json_output_formatter.JsonOutputFormatter(
output_stream, benchmark_metadata))
elif output_format == 'chartjson':
output_formatters.append(
chart_json_output_formatter.ChartJsonOutputFormatter(
output_stream, benchmark_metadata))
else:
# Should never be reached. The parser enforces the choices.
raise Exception('Invalid --output-format "%s". Valid choices are: %s'
% (output_format, ', '.join(_OUTPUT_FORMAT_CHOICES)))
# TODO(chrishenry): This is here to not change the output of
# gtest. Let's try enabling skipped tests summary for gtest test
# results too (in a separate patch), and see if we break anything.
output_skipped_tests_summary = 'gtest' in options.output_formats
reporter = _GetProgressReporter(output_skipped_tests_summary,
options.suppress_gtest_report)
return page_test_results.PageTestResults(
output_formatters=output_formatters, progress_reporter=reporter,
output_dir=options.output_dir,
value_can_be_added_predicate=value_can_be_added_predicate)
|
{
"content_hash": "86d10c9a10360814f3b66c5ba05506a8",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 80,
"avg_line_length": 44.293413173652695,
"alnum_prop": 0.6736514803298634,
"repo_name": "sgraham/nope",
"id": "5b70ad726667ac0d69c17c83400a7fc67d9ea36b",
"size": "7560",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tools/telemetry/telemetry/results/results_options.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "6973"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "39967"
},
{
"name": "C",
"bytes": "4061434"
},
{
"name": "C++",
"bytes": "279546186"
},
{
"name": "CMake",
"bytes": "27212"
},
{
"name": "CSS",
"bytes": "919339"
},
{
"name": "Emacs Lisp",
"bytes": "988"
},
{
"name": "Go",
"bytes": "13628"
},
{
"name": "Groff",
"bytes": "5283"
},
{
"name": "HTML",
"bytes": "15989749"
},
{
"name": "Java",
"bytes": "7541683"
},
{
"name": "JavaScript",
"bytes": "32372588"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "Makefile",
"bytes": "40513"
},
{
"name": "Objective-C",
"bytes": "1584184"
},
{
"name": "Objective-C++",
"bytes": "8249988"
},
{
"name": "PHP",
"bytes": "97817"
},
{
"name": "PLpgSQL",
"bytes": "169060"
},
{
"name": "Perl",
"bytes": "63937"
},
{
"name": "Protocol Buffer",
"bytes": "427339"
},
{
"name": "Python",
"bytes": "8346306"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "844553"
},
{
"name": "Standard ML",
"bytes": "4965"
},
{
"name": "VimL",
"bytes": "4075"
},
{
"name": "XSLT",
"bytes": "418"
},
{
"name": "nesC",
"bytes": "18347"
}
],
"symlink_target": ""
}
|
import os
import webapp2 as webapp
from google.appengine.api import users
from google.appengine.api import images
from google.appengine.api import memcache
from google.appengine.ext import blobstore
import datetime
import json
import handlers
import model.blog
import model.doc
import ctrl.blog
class AdminPage(handlers.BaseHandler):
def dispatch(self):
'''Checks that the user is logged in and such before we process the request.'''
self.user = users.get_current_user()
if not self.user:
# not logged in, so redirect to the login page
self.redirect(users.create_login_url(self.request.uri))
return
# TODO: better handling of authorization...
if self.user.email() != 'dean@codeka.com.au':
# not authorized to view the backend, redirect to the home page instead
self.redirect('/')
return
super(AdminPage, self).dispatch()
class AdminDashboardPage(AdminPage):
def get(self):
self.render('admin/dashboard.html', {})
class AdminPostListPage(AdminPage):
def get(self):
pageNo = 0
if self.request.get('page'):
pageNo = int(self.request.get('page'))
posts = ctrl.blog.getPosts(pageNo, 100, True)
data = {'posts': posts,
'pageNo': pageNo}
self.render('admin/posts_list.html', data)
class AdminPostsPage(AdminPage):
def get(self, postID = None):
data = {}
if postID == 'new':
self.render('admin/posts_new.html', data)
else:
post = ctrl.blog.getPost(int(postID))
data['post'] = post
self.render('admin/posts_edit.html', data)
def post(self, postID):
if postID == 'new':
post = model.blog.Post()
else:
post = ctrl.blog.getPost(int(postID))
post.html = self.request.POST.get('post-content')
post.title = self.request.POST.get('post-title')
if self.request.POST.get('post-date'):
post.posted = datetime.datetime.strptime(self.request.POST.get('post-date'), '%y-%m-%d %H:%M')
post.isPublished = bool(self.request.POST.get('post-ispublished'))
post.tags = []
for tag in self.request.POST.get('post-tags').split(','):
if tag.strip() == '':
continue
post.tags.append(tag.strip())
post.blobs = []
if self.request.POST.get('post-blobs'):
for blobKey in json.loads(self.request.POST.get('post-blobs')):
post.blobs.append(blobKey)
ctrl.blog.savePost(post)
if self.request.POST.get('action') == 'Save & View':
self.redirect('/blog/%04d/%02d/%s' % (post.posted.year, post.posted.month, post.slug))
else:
self.redirect('/admin/posts/%d' % (post.key().id()))
class AdminPostDeletePage(AdminPage):
def get(self, postID):
data = {'post': ctrl.blog.getPost(int(postID))}
self.render('admin/post_delete.html', data)
def post(self, postID):
ctrl.blog.deletePost(int(postID))
self.redirect('/admin/posts')
class AdminBlobsPage(AdminPage):
def get(self):
query = blobstore.BlobInfo.all().order("-creation")
if self.request.get("cursor"):
query.with_cursor(self.request.get("cursor"))
blobs = []
for blob in query:
blobs.append(blob)
if len(blobs) > 20:
break
data = {"blobs": blobs}
cursor = query.cursor()
for blob in query:
# only add the cursor if there's at least one more...
data["cursor"] = cursor
break
self.render('admin/blobs/index.html', data)
class AdminBlobsNewPage(AdminPage):
def get(self):
self.render('admin/blobs/new.html', {})
app = webapp.WSGIApplication([('/admin', AdminDashboardPage),
('/admin/posts', AdminPostListPage),
('/admin/posts/([0-9]+|new)', AdminPostsPage),
('/admin/posts/([0-9]+)/delete', AdminPostDeletePage),
('/admin/blobs', AdminBlobsPage),
('/admin/blobs/new', AdminBlobsNewPage)],
debug=os.environ['SERVER_SOFTWARE'].startswith('Development'))
|
{
"content_hash": "30d6434500db0c1079d756d730c81f53",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 100,
"avg_line_length": 29.43065693430657,
"alnum_prop": 0.6264880952380952,
"repo_name": "jife94/wwmmo",
"id": "eeca573042387326b73261fdba7f47f790aab8e8",
"size": "4033",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "website/handlers/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "141199"
},
{
"name": "HTML",
"bytes": "193014"
},
{
"name": "Java",
"bytes": "2372428"
},
{
"name": "JavaScript",
"bytes": "108104"
},
{
"name": "Protocol Buffer",
"bytes": "41792"
},
{
"name": "Python",
"bytes": "435100"
},
{
"name": "Shell",
"bytes": "39165"
}
],
"symlink_target": ""
}
|
import time
import collections
import grpc
from . import fl_logging as logging
class LocalServicerContext(grpc.ServicerContext):
def invocation_metadata(self):
return ()
def peer(self):
return "local"
def peer_identities(self):
return None
def peer_identity_key(self):
return None
def auth_context(self):
return dict()
def set_compression(self, compression):
return grpc.Compression.NoCompression
def send_initial_metadata(self, initial_metadata):
pass
def set_trailing_metadata(self, trailing_metadata):
pass
def abort(self, code, details):
pass
def abort_with_status(self, status):
pass
def set_code(self, code):
pass
def set_details(self, details):
pass
def disable_next_message_compression(self):
pass
def is_active(self):
return True
def time_remaining(self):
return None
def cancel(self):
pass
def add_callback(self, callback):
pass
def call_with_retry(call, max_retry_times=None, retry_interval=1):
retry_times = 0
while True:
try:
retry_times += 1
return call()
except grpc.RpcError as e:
if max_retry_times is None or retry_times < max_retry_times:
logging.warning(
"grpc call error, status: %s"
", details: %s, wait %ds for retry", e.code(), e.details(),
retry_interval)
time.sleep(retry_interval)
else:
raise e
#def remote_insecure_channel(address, options=None, compression=None):
# EGRESS_URL = os.getenv('EGRESS_URL', None)
# EGRESS_HOST = os.environ.get('EGRESS_HOST', None)
# EGRESS_DOMAIN = os.environ.get('EGRESS_DOMAIN', None)
# if not EGRESS_URL:
# return grpc.insecure_channel(address, options, compression)
#
# options = list(options) if options else list()
# default_authority = EGRESS_HOST or address
# options.append(('grpc.default_authority', default_authority))
# channel = grpc.insecure_channel(EGRESS_URL, options, compression)
#
# if EGRESS_DOMAIN:
# address = address + '.' + EGRESS_DOMAIN
# channel = grpc.intercept_channel(
# channel, add_metadata_interceptor({'x-host': address}))
#
# return channel
#
#
#def add_metadata_interceptor(headers):
# if not isinstance(headers, dict):
# raise TypeError("headers must be a dict")
# headers = list(headers.items())
#
# def add_metadata_fn(client_call_details, request_iterator,
# request_streaming, response_streaming):
# metadata = list(client_call_details.metadata or [])
# metadata.extend(headers)
# client_call_details = _ClientCallDetails(
# client_call_details.method, client_call_details.timeout, metadata,
# client_call_details.credentials)
# return client_call_details, request_iterator, None
#
# return _GenericClientInterceptor(add_metadata_fn)
class _ClientCallDetails(
collections.namedtuple(
'_ClientCallDetails',
('method', 'timeout', 'metadata', 'credentials')),
grpc.ClientCallDetails):
pass
class _GenericClientInterceptor(grpc.UnaryUnaryClientInterceptor,
grpc.UnaryStreamClientInterceptor,
grpc.StreamUnaryClientInterceptor,
grpc.StreamStreamClientInterceptor):
def __init__(self, interceptor_function):
self._fn = interceptor_function
def intercept_unary_unary(self, continuation, client_call_details,
request):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, iter((request, )), False, False)
response = continuation(new_details, next(new_request_iterator))
return postprocess(response) if postprocess else response
def intercept_unary_stream(self, continuation, client_call_details,
request):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, iter((request, )), False, True)
response_it = continuation(new_details, next(new_request_iterator))
return postprocess(response_it) if postprocess else response_it
def intercept_stream_unary(self, continuation, client_call_details,
request_iterator):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, request_iterator, True, False)
response = continuation(new_details, new_request_iterator)
return postprocess(response) if postprocess else response
def intercept_stream_stream(self, continuation, client_call_details,
request_iterator):
new_details, new_request_iterator, postprocess = self._fn(
client_call_details, request_iterator, True, True)
response_it = continuation(new_details, new_request_iterator)
return postprocess(response_it) if postprocess else response_it
|
{
"content_hash": "ed44905b94e2e4e0bde801ad44032c44",
"timestamp": "",
"source": "github",
"line_count": 157,
"max_line_length": 79,
"avg_line_length": 33.203821656050955,
"alnum_prop": 0.6249760214847496,
"repo_name": "bytedance/fedlearner",
"id": "c32d511d76a75f337edd7a6b11f184a4a91c604f",
"size": "5213",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fedlearner/common/grpc_utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "25817"
},
{
"name": "CSS",
"bytes": "7766"
},
{
"name": "Dockerfile",
"bytes": "6341"
},
{
"name": "Go",
"bytes": "163506"
},
{
"name": "HTML",
"bytes": "3527"
},
{
"name": "JavaScript",
"bytes": "482972"
},
{
"name": "Less",
"bytes": "14981"
},
{
"name": "Lua",
"bytes": "8088"
},
{
"name": "Makefile",
"bytes": "2869"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Mustache",
"bytes": "35891"
},
{
"name": "Python",
"bytes": "2412335"
},
{
"name": "Shell",
"bytes": "118210"
},
{
"name": "TypeScript",
"bytes": "805827"
}
],
"symlink_target": ""
}
|
from Tkinter import *
import Pmw
class YasaraControl(Frame):
"""A yasara controller for comparison use"""
def __init__(self, parent, yasara):
Frame.__init__(self, parent, height=200,width=160)
self.yasara = yasara
c=Button(self,text='Align', command=self.align)
c.grid(row=1,column=0,sticky='news',padx=2,pady=2)
'''c = Pmw.Counter(parent,
labelpos = 'w',
label_text = 'residue:',
entryfield_value = 0,
entryfield_command = self.selectRes,
entryfield_validate = {'validator' : 'integer',
'min' : 0, 'max' : 1000})
c.grid(row=2,column=0,columnspan=2,padx=2,pady=2)'''
return
def align(self):
"""try to align objects"""
Y = self.yasara
Y.AlignMultiAll()
return
def selectRes(self):
"""Allow highlight residue from list"""
return
|
{
"content_hash": "d954fb106a6fd7fd46ba88a477049169",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 66,
"avg_line_length": 32,
"alnum_prop": 0.50390625,
"repo_name": "dmnfarrell/peat",
"id": "327eacc11ed91ba1b97427f18fc672b9966c6ebd",
"size": "1231",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PEATDB/Yasara.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "243"
},
{
"name": "C",
"bytes": "744763"
},
{
"name": "C++",
"bytes": "999138"
},
{
"name": "CSS",
"bytes": "10879"
},
{
"name": "Gnuplot",
"bytes": "311"
},
{
"name": "JavaScript",
"bytes": "60380"
},
{
"name": "Makefile",
"bytes": "12428"
},
{
"name": "Mathematica",
"bytes": "964"
},
{
"name": "Matlab",
"bytes": "820"
},
{
"name": "Mercury",
"bytes": "26238794"
},
{
"name": "PHP",
"bytes": "92905"
},
{
"name": "Python",
"bytes": "5466696"
},
{
"name": "Shell",
"bytes": "2984"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
import aaf
import aaf.mob
import aaf.define
import aaf.iterator
import aaf.dictionary
import aaf.storage
import aaf.component
import unittest
import os
cur_dir = os.path.dirname(os.path.abspath(__file__))
sandbox = os.path.join(cur_dir,'sandbox')
if not os.path.exists(sandbox):
os.makedirs(sandbox)
class TestTypeDefSet(unittest.TestCase):
def test_typedef_set(self):
f = aaf.open()
marker = f.create.DescriptiveMarker()
elements = [1,2,3]
marker.set_described_slot_ids([1,2,3])
self.assertTrue(elements == list(marker['DescribedSlots'].value))
self.assertTrue(marker['DescribedSlots'].typedef.size(
marker['DescribedSlots'].property_value()) == len(elements))
if __name__ == "__main__":
unittest.main()
|
{
"content_hash": "a3690eb81b54247dd15f88e9190f29f7",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 80,
"avg_line_length": 27.6,
"alnum_prop": 0.6751207729468599,
"repo_name": "markreidvfx/pyaaf",
"id": "6a9d87caba4a349602cafb977cd1939288a0f977",
"size": "828",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_TypeDefSet.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "29273"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "583989"
},
{
"name": "Shell",
"bytes": "487"
}
],
"symlink_target": ""
}
|
import time
import telnetlib
import logging
from threading import Lock
from .defines import *
__version__ = "0.1"
__license__ = "BSD 3-Clause"
__copyright__ = "Copyright 2011, Andrew Williams"
__author__ = "Andrew Williams, Krzysztof Jagiello"
class ConnectionError(Exception):
def __init__(self, ip, port):
self.ip = ip
self.port = port
def __str__():
return 'Error connecting to host %s port %s.' % (self.ip, self.port,)
class NoConnectionError(Exception):
def __str__():
return 'No connection established.' % (self.ip, self.port,)
class InvalidArguments(ValueError):
"""
Raised when a abstracted function has received invalid arguments
"""
ts3_escape = [
(chr(92), '\\'), # \
(chr(47), "\/"), # /
(chr(32), '\s'), # Space
(chr(124), '\p'), # |
(chr(7), '\a'), # Bell
(chr(8), '\b'), # Backspace
(chr(12), '\f'), # Formfeed
(chr(10), '\n'), # Newline
(chr(13), '\r'), # Carrage Return
(chr(9), '\t'), # Horizontal Tab
(chr(11), '\v'), # Vertical tab
]
ts3_escape_byte = [
(str.encode(chr(92)), b'\\'), # \
(str.encode(chr(47)), b"\/"), # /
(str.encode(chr(32)), b'\s'), # Space
(str.encode(chr(124)), b'\p'), # |
(str.encode(chr(7)), b'\a'), # Bell
(str.encode(chr(8)), b'\b'), # Backspace
(str.encode(chr(12)), b'\f'), # Formfeed
(str.encode(chr(10)), b'\n'), # Newline
(str.encode(chr(13)), b'\r'), # Carrage Return
(str.encode(chr(9)), b'\t'), # Horizontal Tab
(str.encode(chr(11)), b'\v'), # Vertical tab
]
class TS3Response():
def __init__(self, response, data):
self.response = TS3Proto.parse_response(response)
self.data = TS3Proto.parse_data(data)
if isinstance(self.data, dict):
if self.data:
self.data = [self.data]
else:
self.data = []
@property
def is_successful(self):
return self.response['msg'] == 'ok'
class TS3Proto():
io_lock = Lock()
@property
def logger(self):
if not hasattr(self, "_logger"):
self._logger = logging.getLogger(__name__)
return self._logger
def connect(self, ip, port=10011, timeout=5):
self.io_lock.acquire()
try:
self._telnet = telnetlib.Telnet(ip, port)
except telnetlib.socket.error:
raise ConnectionError(ip, port)
self._timeout = timeout
self._connected = False
data = self._telnet.read_until(b"\n\r", self._timeout)
self.io_lock.release()
if data.endswith(b"TS3\n\r"):
self._connected = True
return self._connected
def disconnect(self):
self.check_connection()
self.send_command("quit")
self._telnet.close()
self._connected = False
def send_command(self, command, keys=None, opts=None):
self.check_connection()
commandstr = self.construct_command(command, keys=keys, opts=opts)
self.logger.debug("send_command - %s" % commandstr)
self.io_lock.acquire()
self._telnet.write(str.encode("%s\n\r" % commandstr))
data = ""
response = self._telnet.read_until(b"\n\r", self._timeout)
self.io_lock.release()
if not response.startswith(b"error"):
# what we just got was extra data
data = response
response = self._telnet.read_until(b"\n\r", self._timeout)
if isinstance(data, bytes):
return TS3Response(response, data.decode("utf-8"))
return TS3Response(response, data)
def check_connection(self):
if not self.is_connected:
raise NoConnectionError
def is_connected(self):
return self._connected
def construct_command(self, command, keys=None, opts=None):
"""
Constructs a TS3 formatted command string
Keys can have a single nested list to construct a nested parameter
@param command: Command list
@type command: string
@param keys: Key/Value pairs
@type keys: dict
@param opts: Options
@type opts: list
"""
cstr = [command]
# Add the keys and values, escape as needed
if keys:
for key in keys:
if isinstance(keys[key], list):
ncstr = []
for nest in keys[key]:
ncstr.append("%s=%s" % (key, self._escape_str(nest)))
cstr.append("|".join(ncstr))
else:
cstr.append("%s=%s" % (key, self._escape_str(keys[key])))
# Add in options
if opts:
for opt in opts:
cstr.append("-%s" % opt)
return " ".join(cstr)
@staticmethod
def parse_response(response):
"""
Parses a TS3 command string into command/keys/opts tuple
@param command: Command string
@type command: string
"""
# responses always begins with "error " so we may just skip it
return TS3Server.parse_data(response[6:])
@staticmethod
def parse_data(data):
"""
Parses data string consisting of key=value
@param data: data string
@type data: string
"""
if isinstance(data, bytes):
data = data.decode("utf-8")
data = data.strip()
multipart = data.split('|')
if len(multipart) > 1:
values = []
for part in multipart:
values.append(TS3Proto.parse_data(part))
return values
chunks = data.split(' ')
parsed_data = {}
for chunk in chunks:
chunk = chunk.strip().split('=')
if len(chunk) > 1:
if len(chunk) > 2:
# value can contain '=' which may confuse our parser
chunk = [chunk[0], '='.join(chunk[1:])]
key, value = chunk
parsed_data[key] = TS3Proto._unescape_str(value)
else:
# TS3 Query Server may sometimes return a key without any value
# and we default its value to None
parsed_data[chunk[0]] = None
return parsed_data
@staticmethod
def _escape_str(value):
"""
Escape a value into a TS3 compatible string
@param value: Value
@type value: string/int
"""
if isinstance(value, int):
return str(value)
for i, j in ts3_escape:
value = value.replace(i, j)
return value
@staticmethod
def _unescape_str(value):
"""
Unescape a TS3 compatible string into a normal string
@param value: Value
@type value: string/int
"""
if isinstance(value, int):
return str(value)
for i, j in ts3_escape:
value = value.replace(j, i)
return value
class TS3Server(TS3Proto):
def __init__(self, ip=None, port=10011, id=0):
"""
Abstraction class for TS3 Servers
@param ip: IP Address
@type ip: str
@param port: Port Number
@type port: int
"""
if ip and port:
if self.connect(ip, port) and id > 0:
self.use(id)
@property
def logger(self):
if not hasattr(self, "_logger"):
self._logger = logging.getLogger(__name__)
return self._logger
def login(self, username, password):
"""
Login to the TS3 Server
@param username: Username
@type username: str
@param password: Password
@type password: str
"""
response = self.send_command('login', keys={'client_login_name': username, 'client_login_password': password })
return response.is_successful
def serverlist(self):
"""
Get a list of all Virtual Servers on the connected TS3 instance
"""
return self.send_command('serverlist')
def gm(self, msg):
"""
Send a global message to the current Virtual Server
@param msg: Message
@type ip: str
"""
response = self.send_command('gm', keys={'msg': msg})
return response.is_successful
def use(self, id):
"""
Use a particular Virtual Server instance
@param id: Virtual Server ID
@type id: int
"""
response = self.send_command('use', keys={'sid': id})
return response.is_successful
def clientlist(self):
"""
Returns a clientlist of the current connected server/vhost
"""
response = self.send_command('clientlist')
if response.is_successful:
clientlist = {}
for client in response.data:
clientlist[client['clid']] = client
return clientlist
else:
# TODO: Raise a exception?
self.logger.debug("clientlist - error retrieving client list")
return {}
def clientkick(self, clid=None, cldbid=None, type=REASON_KICK_SERVER, message=None):
"""
Kicks a user identified by either clid or cldbid
"""
client = None
if cldbid:
clientlist = self.send_command('clientlist')
for cl in list(clientlist.values()):
if int(cl['client_database_id']) == cldbid:
client = cl['clid']
self.logger.debug("clientkick - identified user from clid (%s = %s)" % (cldbid, client))
break
if not client:
# TODO: we should throw an exception here actually
self.logger.debug("clientkick - no client with specified cldbid (%s) was found" % cldbid)
return False
elif clid:
client = clid
else:
raise InvalidArguments('No clid or cldbid provided')
if not message:
message = ''
else:
# Kick message can only be 40 characters
message = message[:40]
if client:
self.logger.debug("clientkick - Kicking clid %s" % client)
response = self.send_command('clientkick', keys={'clid': client, 'reasonid': type, 'reasonmsg': message})
return response.is_successful
return False
def clientpoke(self, clid, message):
"""
Poke a client with the specified message
"""
response = self.send_command('clientpoke', keys={'clid': clid, 'msg': message})
return response.is_successful
|
{
"content_hash": "b97cc749bd80d8386214c46fa0b0fb68",
"timestamp": "",
"source": "github",
"line_count": 383,
"max_line_length": 119,
"avg_line_length": 28.245430809399476,
"alnum_prop": 0.533092993159549,
"repo_name": "DownGoat/ts3_watcher",
"id": "e1df9d067d35adf713b8d87e54daabe63c8b6f53",
"size": "12403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ts3/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40768"
}
],
"symlink_target": ""
}
|
"""
quilt nose tests
project : quilt
version : 0.1.1
status : development
modifydate : 2015-05-13 07:09:00 -0700
createdate : 2015-04-28 07:23:00 -0700
website : https://github.com/tmthydvnprt/quilt
author : tmthydvnprt
email : tmthydvnprt@users.noreply.github.com
maintainer : tmthydvnprt
license : MIT
copyright : Copyright 2015, quilt
credits :
"""
__all__ = ['test']
|
{
"content_hash": "203779652a902ec4f09938c81ec6cb3d",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 21.210526315789473,
"alnum_prop": 0.674937965260546,
"repo_name": "tmthydvnprt/quilt",
"id": "f08cf46ed2f97aba38bc740e501cbfee9583865d",
"size": "403",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "115745"
},
{
"name": "Shell",
"bytes": "1227"
}
],
"symlink_target": ""
}
|
"""The main engine of every compiler engine pipeline, called MainEngine."""
import atexit
import sys
import traceback
import weakref
from projectq.backends import Simulator
from projectq.ops import Command, FlushGate
from projectq.types import WeakQubitRef
from ._basicmapper import BasicMapperEngine
from ._basics import BasicEngine
class NotYetMeasuredError(Exception):
"""Exception raised when trying to access the measurement value of a qubit that has not yet been measured."""
class UnsupportedEngineError(Exception):
"""Exception raised when a non-supported compiler engine is encountered."""
class _ErrorEngine: # pylint: disable=too-few-public-methods
"""
Fake compiler engine class.
Fake compiler engine class only used to ensure gracious failure when an exception occurs in the MainEngine
constructor.
"""
def receive(self, command_list): # pylint: disable=unused-argument
"""No-op."""
_N_ENGINES_THRESHOLD = 100
class MainEngine(BasicEngine): # pylint: disable=too-many-instance-attributes
"""
The MainEngine class provides all functionality of the main compiler engine.
It initializes all further compiler engines (calls, e.g., .next_engine=...) and keeps track of measurement results
and active qubits (and their IDs).
Attributes:
next_engine (BasicEngine): Next compiler engine (or the back-end).
main_engine (MainEngine): Self.
active_qubits (WeakSet): WeakSet containing all active qubits
dirty_qubits (Set): Containing all dirty qubit ids
backend (BasicEngine): Access the back-end.
mapper (BasicMapperEngine): Access to the mapper if there is one.
n_engines (int): Current number of compiler engines in the engine list
n_engines_max (int): Maximum number of compiler engines allowed in the engine list. Defaults to 100.
"""
def __init__( # pylint: disable=too-many-statements,too-many-branches
self, backend=None, engine_list=None, verbose=False
):
"""
Initialize the main compiler engine and all compiler engines.
Sets 'next_engine'- and 'main_engine'-attributes of all compiler engines and adds the back-end as the last
engine.
Args:
backend (BasicEngine): Backend to send the compiled circuit to.
engine_list (list<BasicEngine>): List of engines / backends to use as compiler engines. Note: The engine
list must not contain multiple mappers (instances of BasicMapperEngine).
Default: projectq.setups.default.get_engine_list()
verbose (bool): Either print full or compact error messages.
Default: False (i.e. compact error messages).
Example:
.. code-block:: python
from projectq import MainEngine
eng = MainEngine() # uses default engine_list and the Simulator
Instead of the default `engine_list` one can use, e.g., one of the IBM
setups which defines a custom `engine_list` useful for one of the IBM
chips
Example:
.. code-block:: python
import projectq.setups.ibm as ibm_setup
from projectq import MainEngine
eng = MainEngine(engine_list=ibm_setup.get_engine_list())
# eng uses the default Simulator backend
Alternatively, one can specify all compiler engines explicitly, e.g.,
Example:
.. code-block:: python
from projectq.cengines import (
TagRemover,
AutoReplacer,
LocalOptimizer,
DecompositionRuleSet,
)
from projectq.backends import Simulator
from projectq import MainEngine
rule_set = DecompositionRuleSet()
engines = [AutoReplacer(rule_set), TagRemover(), LocalOptimizer(3)]
eng = MainEngine(Simulator(), engines)
"""
super().__init__()
self.active_qubits = weakref.WeakSet()
self._measurements = {}
self.dirty_qubits = set()
self.verbose = verbose
self.main_engine = self
self.n_engines_max = _N_ENGINES_THRESHOLD
if backend is None:
backend = Simulator()
else: # Test that backend is BasicEngine object
if not isinstance(backend, BasicEngine):
self.next_engine = _ErrorEngine()
raise UnsupportedEngineError(
"\nYou supplied a backend which is not supported,\n"
"i.e. not an instance of BasicEngine.\n"
"Did you forget the brackets to create an instance?\n"
"E.g. MainEngine(backend=Simulator) instead of \n"
" MainEngine(backend=Simulator())"
)
self.backend = backend
# default engine_list is projectq.setups.default.get_engine_list()
if engine_list is None:
import projectq.setups.default # pylint: disable=import-outside-toplevel
engine_list = projectq.setups.default.get_engine_list()
self.mapper = None
if isinstance(engine_list, list):
# Test that engine list elements are all BasicEngine objects
for current_eng in engine_list:
if not isinstance(current_eng, BasicEngine):
self.next_engine = _ErrorEngine()
raise UnsupportedEngineError(
"\nYou supplied an unsupported engine in engine_list,"
"\ni.e. not an instance of BasicEngine.\n"
"Did you forget the brackets to create an instance?\n"
"E.g. MainEngine(engine_list=[AutoReplacer]) instead of\n"
" MainEngine(engine_list=[AutoReplacer()])"
)
if isinstance(current_eng, BasicMapperEngine):
if self.mapper is None:
self.mapper = current_eng
else:
self.next_engine = _ErrorEngine()
raise UnsupportedEngineError("More than one mapper engine is not supported.")
else:
self.next_engine = _ErrorEngine()
raise UnsupportedEngineError("The provided list of engines is not a list!")
engine_list = engine_list + [backend]
# Test that user did not supply twice the same engine instance
num_different_engines = len({id(item) for item in engine_list})
if len(engine_list) != num_different_engines:
self.next_engine = _ErrorEngine()
raise UnsupportedEngineError(
"\nError:\n You supplied twice the same engine as backend"
" or item in engine_list. This doesn't work. Create two \n"
" separate instances of a compiler engine if it is needed\n"
" twice.\n"
)
self.n_engines = len(engine_list)
if self.n_engines > self.n_engines_max:
raise ValueError('Too many compiler engines added to the MainEngine!')
self._qubit_idx = int(0)
for i in range(len(engine_list) - 1):
engine_list[i].next_engine = engine_list[i + 1]
engine_list[i].main_engine = self
engine_list[-1].main_engine = self
engine_list[-1].is_last_engine = True
self.next_engine = engine_list[0]
# In order to terminate an example code without eng.flush
def atexit_function(weakref_main_eng):
eng = weakref_main_eng()
if eng is not None:
if not hasattr(sys, "last_type"):
eng.flush(deallocate_qubits=True)
# An exception causes the termination, don't send a flush and make sure no qubits send deallocation
# gates anymore as this might trigger additional exceptions
else:
for qubit in eng.active_qubits:
qubit.id = -1
self._delfun = atexit_function
weakref_self = weakref.ref(self)
atexit.register(self._delfun, weakref_self)
def __del__(self):
"""
Destroy the main engine.
Flushes the entire circuit down the pipeline, clearing all temporary buffers (in, e.g., optimizers).
"""
if not hasattr(sys, "last_type"):
self.flush(deallocate_qubits=True)
try:
atexit.unregister(self._delfun) # only available in Python3
except AttributeError: # pragma: no cover
pass
def set_measurement_result(self, qubit, value):
"""
Register a measurement result.
The engine being responsible for measurement results needs to register these results with the master engine
such that they are available when the user calls an int() or bool() conversion operator on a measured qubit.
Args:
qubit (BasicQubit): Qubit for which to register the measurement result.
value (bool): Boolean value of the measurement outcome (True / False = 1 / 0 respectively).
"""
self._measurements[qubit.id] = bool(value)
def get_measurement_result(self, qubit):
"""
Return the classical value of a measured qubit, given that an engine registered this result previously.
See also setMeasurementResult.
Args:
qubit (BasicQubit): Qubit of which to get the measurement result.
Example:
.. code-block:: python
from projectq.ops import H, Measure
from projectq import MainEngine
eng = MainEngine()
qubit = eng.allocate_qubit() # quantum register of size 1
H | qubit
Measure | qubit
eng.get_measurement_result(qubit[0]) == int(qubit)
"""
if qubit.id in self._measurements:
return self._measurements[qubit.id]
raise NotYetMeasuredError(
"\nError: Can't access measurement result for qubit #" + str(qubit.id) + ". The problem may be:\n\t"
"1. Your code lacks a measurement statement\n\t"
"2. You have not yet called engine.flush() to force execution of your code\n\t"
"3. The "
"underlying backend failed to register the measurement result\n"
)
def get_new_qubit_id(self):
"""
Return a unique qubit id to be used for the next qubit allocation.
Returns:
new_qubit_id (int): New unique qubit id.
"""
self._qubit_idx += 1
return self._qubit_idx - 1
def receive(self, command_list):
"""
Forward the list of commands to the first engine.
Args:
command_list (list<Command>): List of commands to receive (and
then send on)
"""
self.send(command_list)
def send(self, command_list):
"""
Forward the list of commands to the next engine in the pipeline.
It also shortens exception stack traces if self.verbose is False.
"""
try:
self.next_engine.receive(command_list)
except Exception as err: # pylint: disable=broad-except
if self.verbose:
raise
exc_type, exc_value, _ = sys.exc_info()
# try:
last_line = traceback.format_exc().splitlines()
compact_exception = exc_type(
str(exc_value) + '\n raised in:\n' + repr(last_line[-3]) + "\n" + repr(last_line[-2])
)
compact_exception.__cause__ = None
raise compact_exception from err # use verbose=True for more info
def flush(self, deallocate_qubits=False):
"""
Flush the entire circuit down the pipeline, clearing potential buffers (of, e.g., optimizers).
Args:
deallocate_qubits (bool): If True, deallocates all qubits that are still alive (invalidating references to
them by setting their id to -1).
"""
if deallocate_qubits:
while [qb for qb in self.active_qubits if qb is not None]:
qb = self.active_qubits.pop() # noqa: F841
qb.__del__() # pylint: disable=unnecessary-dunder-call
self.receive([Command(self, FlushGate(), ([WeakQubitRef(self, -1)],))])
|
{
"content_hash": "024831e2055f31f212f5bd1bea29a344",
"timestamp": "",
"source": "github",
"line_count": 312,
"max_line_length": 118,
"avg_line_length": 40.20192307692308,
"alnum_prop": 0.5951526747986925,
"repo_name": "ProjectQ-Framework/ProjectQ",
"id": "87ec1b3d3eab2a710e9679e3b8b77c8dba58b3f5",
"size": "13164",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "projectq/cengines/_main.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "158833"
},
{
"name": "Python",
"bytes": "1483141"
}
],
"symlink_target": ""
}
|
__doc__=''
__version__=''' $Id$ '''
#REPORTLAB_TEST_SCRIPT
import sys
from reportlab.platypus import *
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.rl_config import defaultPageSize
PAGE_HEIGHT=defaultPageSize[1]
styles = getSampleStyleSheet()
Title = "Integrating Diverse Data Sources with Gadfly 2"
Author = "Aaron Watters"
URL = "http://www.chordate.com/"
email = "arw@ifu.net"
Abstract = """This paper describes the primative methods underlying the implementation
of SQL query evaluation in Gadfly 2, a database management system implemented
in Python [Van Rossum]. The major design goals behind
the architecture described here are to simplify the implementation
and to permit flexible and efficient extensions to the gadfly
engine. Using this architecture and its interfaces programmers
can add functionality to the engine such as alternative disk based
indexed table implementations, dynamic interfaces to remote data
bases or or other data sources, and user defined computations."""
from reportlab.lib.units import inch
pageinfo = "%s / %s / %s" % (Author, email, Title)
def myFirstPage(canvas, doc):
canvas.saveState()
#canvas.setStrokeColorRGB(1,0,0)
#canvas.setLineWidth(5)
#canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Bold',16)
canvas.drawString(108, PAGE_HEIGHT-108, Title)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "First Page / %s" % pageinfo)
canvas.restoreState()
def myLaterPages(canvas, doc):
#canvas.drawImage("snkanim.gif", 36, 36)
canvas.saveState()
#canvas.setStrokeColorRGB(1,0,0)
#canvas.setLineWidth(5)
#canvas.line(66,72,66,PAGE_HEIGHT-72)
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d %s" % (doc.page, pageinfo))
canvas.restoreState()
def go():
Elements.insert(0,Spacer(0,inch))
doc = SimpleDocTemplate('gfe.pdf')
doc.build(Elements,onFirstPage=myFirstPage, onLaterPages=myLaterPages)
Elements = []
HeaderStyle = styles["Heading1"] # XXXX
def header(txt, style=HeaderStyle, klass=Paragraph, sep=0.3):
s = Spacer(0.2*inch, sep*inch)
Elements.append(s)
para = klass(txt, style)
Elements.append(para)
ParaStyle = styles["Normal"]
def p(txt):
return header(txt, style=ParaStyle, sep=0.1)
#pre = p # XXX
PreStyle = styles["Code"]
def pre(txt):
s = Spacer(0.1*inch, 0.1*inch)
Elements.append(s)
p = Preformatted(txt, PreStyle)
Elements.append(p)
#header(Title, sep=0.1. style=ParaStyle)
header(Author, sep=0.1, style=ParaStyle)
header(URL, sep=0.1, style=ParaStyle)
header(email, sep=0.1, style=ParaStyle)
header("ABSTRACT")
p(Abstract)
header("Backgrounder")
p("""\
The term "database" usually refers to a persistent
collection of data. Data is persistent if it continues
to exist whether or not it is associated with a running
process on the computer, or even if the computer is
shut down and restarted at some future time. Database
management systems provide support for constructing databases,
maintaining databases, and extracting information from databases.""")
p("""\
Relational databases manipulate and store persistent
table structures called relations, such as the following
three tables""")
pre("""\
-- drinkers who frequent bars (this is a comment)
select * from frequents
DRINKER | PERWEEK | BAR
============================
adam | 1 | lolas
woody | 5 | cheers
sam | 5 | cheers
norm | 3 | cheers
wilt | 2 | joes
norm | 1 | joes
lola | 6 | lolas
norm | 2 | lolas
woody | 1 | lolas
pierre | 0 | frankies
)
""")
pre("""\
-- drinkers who like beers
select * from likes
DRINKER | PERDAY | BEER
===============================
adam | 2 | bud
wilt | 1 | rollingrock
sam | 2 | bud
norm | 3 | rollingrock
norm | 2 | bud
nan | 1 | sierranevada
woody | 2 | pabst
lola | 5 | mickies
""")
pre("""\
-- beers served from bars
select * from serves
BAR | QUANTITY | BEER
=================================
cheers | 500 | bud
cheers | 255 | samadams
joes | 217 | bud
joes | 13 | samadams
joes | 2222 | mickies
lolas | 1515 | mickies
lolas | 333 | pabst
winkos | 432 | rollingrock
frankies | 5 | snafu
""")
p("""
The relational model for database structures makes
the simplifying assumption that all data in a database
can be represented in simple table structures
such as these. Although this assumption seems extreme
it provides a good foundation for defining solid and
well defined database management systems and some
of the most successful software companies in the
world, such as Oracle, Sybase, IBM, and Microsoft,
have marketed database management systems based on
the relational model quite successfully.
""")
p("""
SQL stands for Structured Query Language.
The SQL language defines industry standard
mechanisms for creating, querying, and modified
relational tables. Several years ago SQL was one
of many Relational Database Management System
(RDBMS) query languages in use, and many would
argue not the best on. Now, largely due
to standardization efforts and the
backing of IBM, SQL is THE standard way to talk
to database systems.
""")
p("""
There are many advantages SQL offers over other
database query languages and alternative paradigms
at this time (please see [O'Neill] or [Korth and Silberschatz]
for more extensive discussions and comparisons between the
SQL/relational approach and others.)
""")
p("""
The chief advantage over all contenders at this time
is that SQL and the relational model are now widely
used as interfaces and back end data stores to many
different products with different performance characteristics,
user interfaces, and other qualities: Oracle, Sybase,
Ingres, SQL Server, Access, Outlook,
Excel, IBM DB2, Paradox, MySQL, MSQL, POSTgres, and many
others. For this reason a program designed to use
an SQL database as its data storage mechanism can
easily be ported from one SQL data manager to another,
possibly on different platforms. In fact the same
program can seamlessly use several backends and/or
import/export data between different data base platforms
with trivial ease.
No other paradigm offers such flexibility at the moment.
""")
p("""
Another advantage which is not as immediately
obvious is that the relational model and the SQL
query language are easily understood by semi-technical
and non-technical professionals, such as business
people and accountants. Human resources managers
who would be terrified by an object model diagram
or a snippet of code that resembles a conventional
programming language will frequently feel quite at
ease with a relational model which resembles the
sort of tabular data they deal with on paper in
reports and forms on a daily basis. With a little training the
same HR managers may be able to translate the request
"Who are the drinkers who like bud and frequent cheers?"
into the SQL query
""")
pre("""
select drinker
from frequents
where bar='cheers'
and drinker in (
select drinker
from likes
where beer='bud')
""")
p("""
(or at least they have some hope of understanding
the query once it is written by a technical person
or generated by a GUI interface tool). Thus the use
of SQL and the relational model enables communication
between different communities which must understand
and interact with stored information. In contrast
many other approaches cannot be understood easily
by people without extensive programming experience.
""")
p("""
Furthermore the declarative nature of SQL
lends itself to automatic query optimization,
and engines such as Gadfly can automatically translate a user query
into an optimized query plan which takes
advantage of available indices and other data characteristics.
In contrast more navigational techniques require the application
program itself to optimize the accesses to the database and
explicitly make use of indices.
""")
# HACK
Elements.append(PageBreak())
p("""
While it must be admitted that there are application
domains such as computer aided engineering design where
the relational model is unnatural, it is also important
to recognize that for many application domains (such
as scheduling, accounting, inventory, finance, personal
information management, electronic mail) the relational
model is a very natural fit and the SQL query language
make most accesses to the underlying data (even sophisticated
ones) straightforward. """)
p("""For an example of a moderately
sophisticated query using the tables given above,
the following query lists the drinkers who frequent lolas bar
and like at least two beers not served by lolas
""")
if 0:
go()
sys.exit(1)
pre("""
select f.drinker
from frequents f, likes l
where f.drinker=l.drinker and f.bar='lolas'
and l.beer not in
(select beer from serves where bar='lolas')
group by f.drinker
having count(distinct beer)>=2
""")
p("""
yielding the result
""")
pre("""
DRINKER
=======
norm
""")
p("""
Experience shows that queries of this sort are actually
quite common in many applications, and are often much more
difficult to formulate using some navigational database
organizations, such as some "object oriented" database
paradigms.
""")
p("""
Certainly,
SQL does not provide all you need to interact with
databases -- in order to do "real work" with SQL you
need to use SQL and at least one other language
(such as C, Pascal, C++, Perl, Python, TCL, Visual Basic
or others) to do work (such as readable formatting a report
from raw data) that SQL was not designed to do.
""")
header("Why Gadfly 1?")
p("""Gadfly 1.0 is an SQL based relational database implementation
implemented entirely in the Python programming language, with
optional fast data structure accellerators implemented in the
C programming language. Gadfly is relatively small, highly portable,
very easy to use (especially for programmers with previous experience
with SQL databases such as MS Access or Oracle), and reasonably
fast (especially when the kjbuckets C accellerators are used).
For moderate sized problems Gadfly offers a fairly complete
set of features such as transaction semantics, failure recovery,
and a TCP/IP based client/server mode (Please see [Gadfly] for
detailed discussion).""")
header("Why Gadfly 2?")
p("""Gadfly 1.0 also has significant limitations. An active Gadfly
1.0 database keeps all data in (virtual) memory, and hence a Gadfly
1.0 database is limited in size to available virtual memory. Important
features such as date/time/interval operations, regular expression
matching and other standard SQL features are not implemented in
Gadfly 1.0. The optimizer and the query evaluator perform optimizations
using properties of the equality predicate but do not optimize
using properties of inequalities such as BETWEEN or less-than.
It is possible to add "extension views" to a Gadfly
1.0 database, but the mechanism is somewhat clumsy and indices
over extension views are not well supported. The features of Gadfly
2.0 discussed here attempt to address these deficiencies by providing
a uniform extension model that permits addition of alternate table,
function, and predicate implementations.""")
p("""Other deficiencies, such as missing constructs like "ALTER
TABLE" and the lack of outer joins and NULL values are not
addressed here, although they may be addressed in Gadfly 2.0 or
a later release. This paper also does not intend to explain
the complete operations of the internals; it is intended to provide
at least enough information to understand the basic mechanisms
for extending gadfly.""")
p("""Some concepts and definitions provided next help with the description
of the gadfly interfaces. [Note: due to the terseness of this
format the ensuing is not a highly formal presentation, but attempts
to approach precision where precision is important.]""")
header("The semilattice of substitutions")
p("""Underlying the gadfly implementation are the basic concepts
associated with substitutions. A substitution is a mapping
of attribute names to values (implemented in gadfly using kjbuckets.kjDict
objects). Here an attribute refers to some sort of "descriptive
variable", such as NAME and a value is an assignment for that variable,
like "Dave Ascher". In Gadfly a table is implemented as a sequence
of substitutions, and substitutions are used in many other ways as well.
""")
p("""
For example consider the substitutions""")
pre("""
A = [DRINKER=>'sam']
B = [DRINKER=>'sam', BAR=>'cheers']
C = [DRINKER=>'woody', BEER=>'bud']
D = [DRINKER=>'sam', BEER=>'mickies']
E = [DRINKER=>'sam', BAR=>'cheers', BEER=>'mickies']
F = [DRINKER=>'sam', BEER=>'mickies']
G = [BEER=>'bud', BAR=>'lolas']
H = [] # the empty substitution
I = [BAR=>'cheers', CAPACITY=>300]""")
p("""A trivial but important observation is that since substitutions
are mappings, no attribute can assume more than one value in a
substitution. In the operations described below whenever an operator
"tries" to assign more than one value to an attribute
the operator yields an "overdefined" or "inconsistent"
result.""")
header("Information Semi-order:")
p("""Substitution B is said to be
more informative than A because B agrees with all assignments
in A (in addition to providing more information as well). Similarly
we say that E is more informative than A, B, D, F. and H but E
is not more informative than the others since, for example G disagrees
with E on the value assigned to the BEER attribute and I provides
additional CAPACITY information not provided in E.""")
header("Joins and Inconsistency:")
p("""A join of two substitutions
X and Y is the least informative substitution Z such that Z is
more informative (or equally informative) than both X and Y. For
example B is the join of B with A, E is the join of B with D and""")
pre("""
E join I =
[DRINKER=>'sam', BAR=>'cheers', BEER=>'mickies', CAPACITY=>300]""")
p("""For any two substitutions either (1) they disagree on the value
assigned to some attribute and have no join or (2) they agree
on all common attributes (if there are any) and their join is
the union of all (name, value) assignments in both substitutions.
Written in terms of kjbucket.kjDict operations two kjDicts X and
Y have a join Z = (X+Y) if and only if Z.Clean() is not None.
Two substitutions that have no join are said to be inconsistent.
For example I and G are inconsistent since they disagree on
the value assigned to the BAR attribute and therefore have no
join. The algebra of substitutions with joins technically defines
an abstract algebraic structure called a semilattice.""")
header("Name space remapping")
p("""Another primitive operation over substitutions is the remap
operation S2 = S.remap(R) where S is a substitution and R is a
graph of attribute names and S2 is a substitution. This operation
is defined to produce the substitution S2 such that""")
pre("""
Name=>Value in S2 if and only if
Name1=>Value in S and Name<=Name1 in R
""")
p("""or if there is no such substitution S2 the remap value is said
to be overdefined.""")
p("""For example the remap operation may be used to eliminate attributes
from a substitution. For example""")
pre("""
E.remap([DRINKER<=DRINKER, BAR<=BAR])
= [DRINKER=>'sam', BAR=>'cheers']
""")
p("""Illustrating that remapping using the [DRINKER<=DRINKER,
BAR<=BAR] graph eliminates all attributes except DRINKER and
BAR, such as BEER. More generally remap can be used in this way
to implement the classical relational projection operation. (See [Korth and Silberschatz]
for a detailed discussion of the projection operator and other relational
algebra operators such as selection, rename, difference and joins.)""")
p("""The remap operation can also be used to implement "selection
on attribute equality". For example if we are interested
in the employee names of employees who are their own bosses we
can use the remapping graph""")
pre("""
R1 = [NAME<=NAME, NAME<=BOSS]
""")
p("""and reject substitutions where remapping using R1 is overdefined.
For example""")
pre("""
S1 = [NAME=>'joe', BOSS=>'joe']
S1.remap(R1) = [NAME=>'joe']
S2 = [NAME=>'fred', BOSS=>'joe']
S2.remap(R1) is overdefined.
""")
p("""The last remap is overdefined because the NAME attribute cannot
assume both the values 'fred' and 'joe' in a substitution.""")
p("""Furthermore, of course, the remap operation can be used to
"rename attributes" or "copy attribute values"
in substitutions. Note below that the missing attribute CAPACITY
in B is effectively ignored in the remapping operation.""")
pre("""
B.remap([D<=DRINKER, B<=BAR, B2<=BAR, C<=CAPACITY])
= [D=>'sam', B=>'cheers', B2=>'cheers']
""")
p("""More interestingly, a single remap operation can be used to
perform a combination of renaming, projection, value copying,
and attribute equality selection as one operation. In kjbuckets the remapper
graph is implemented using a kjbuckets.kjGraph and the remap operation
is an intrinsic method of kjbuckets.kjDict objects.""")
header("Generalized Table Joins and the Evaluator Mainloop""")
p("""Strictly speaking the Gadfly 2.0 query evaluator only uses
the join and remap operations as its "basic assembly language"
-- all other computations, including inequality comparisons and
arithmetic, are implemented externally to the evaluator as "generalized
table joins." """)
p("""A table is a sequence of substitutions (which in keeping with
SQL semantics may contain redundant entries). The join between
two tables T1 and T2 is the sequence of all possible defined joins
between pairs of elements from the two tables. Procedurally we
might compute the join as""")
pre("""
T1JoinT2 = empty
for t1 in T1:
for t2 in T2:
if t1 join t2 is defined:
add t1 join t2 to T1joinT2""")
p("""In general circumstances this intuitive implementation is a
very inefficient way to compute the join, and Gadfly almost always
uses other methods, particularly since, as described below, a
"generalized table" can have an "infinite"
number of entries.""")
p("""For an example of a table join consider the EMPLOYEES table
containing""")
pre("""
[NAME=>'john', JOB=>'executive']
[NAME=>'sue', JOB=>'programmer']
[NAME=>'eric', JOB=>'peon']
[NAME=>'bill', JOB=>'peon']
""")
p("""and the ACTIVITIES table containing""")
pre("""
[JOB=>'peon', DOES=>'windows']
[JOB=>'peon', DOES=>'floors']
[JOB=>'programmer', DOES=>'coding']
[JOB=>'secretary', DOES=>'phone']""")
p("""then the join between EMPLOYEES and ACTIVITIES must containining""")
pre("""
[NAME=>'sue', JOB=>'programmer', DOES=>'coding']
[NAME=>'eric', JOB=>'peon', DOES=>'windows']
[NAME=>'bill', JOB=>'peon', DOES=>'windows']
[NAME=>'eric', JOB=>'peon', DOES=>'floors']
[NAME=>'bill', JOB=>'peon', DOES=>'floors']""")
p("""A compiled gadfly subquery ultimately appears to the evaluator
as a sequence of generalized tables that must be joined (in combination
with certain remapping operations that are beyond the scope of
this discussion). The Gadfly mainloop proceeds following the very
loose pseudocode:""")
pre("""
Subs = [ [] ] # the unary sequence containing "true"
While some table hasn't been chosen yet:
Choose an unchosen table with the least cost join estimate.
Subs = Subs joined with the chosen table
return Subs""")
p("""[Note that it is a property of the join operation that the
order in which the joins are carried out will not affect the result,
so the greedy strategy of evaluating the "cheapest join next"
will not effect the result. Also note that the treatment of logical
OR and NOT as well as EXIST, IN, UNION, and aggregation and so
forth are not discussed here, even though they do fit into this
approach.]""")
p("""The actual implementation is a bit more complex than this,
but the above outline may provide some useful intuition. The "cost
estimation" step and the implementation of the join operation
itself are left up to the generalized table object implementation.
A table implementation has the ability to give an "infinite"
cost estimate, which essentially means "don't join me in
yet under any circumstances." """)
header("Implementing Functions")
p("""As mentioned above operations such as arithmetic are implemented
using generalized tables. For example the arithmetic Add operation
is implemented in Gadfly internally as an "infinite generalized
table" containing all possible substitutions""")
pre("""
ARG0=>a, ARG1=>b, RESULT=>a+b]
""")
p("""Where a and b are all possible values which can be summed.
Clearly, it is not possible to enumerate this table, but given
a sequence of substitutions with defined values for ARG0 and ARG1
such as""")
pre("""
[ARG0=>1, ARG1=-4]
[ARG0=>2.6, ARG1=50]
[ARG0=>99, ARG1=1]
""")
p("""it is possible to implement a "join operation" against
this sequence that performs the same augmentation as a join with
the infinite table defined above:""")
pre("""
[ARG0=>1, ARG1=-4, RESULT=-3]
[ARG0=>2.6, ARG1=50, RESULT=52.6]
[ARG0=>99, ARG1=1, RESULT=100]
""")
p("""Furthermore by giving an "infinite estimate" for
all attempts to evaluate the join where ARG0 and ARG1 are not
available the generalized table implementation for the addition
operation can refuse to compute an "infinite join." """)
p("""More generally all functions f(a,b,c,d) are represented in
gadfly as generalized tables containing all possible relevant
entries""")
pre("""
[ARG0=>a, ARG1=>b, ARG2=>c, ARG3=>d, RESULT=>f(a,b,c,d)]""")
p("""and the join estimation function refuses all attempts to perform
a join unless all the arguments are provided by the input substitution
sequence.""")
header("Implementing Predicates")
p("""Similarly to functions, predicates such as less-than and BETWEEN
and LIKE are implemented using the generalized table mechanism.
For example the "x BETWEEN y AND z" predicate is implemented
as a generalized table "containing" all possible""")
pre("""
[ARG0=>a, ARG1=>b, ARG2=>c]""")
p("""where b<a<c. Furthermore joins with this table are not
permitted unless all three arguments are available in the sequence
of input substitutions.""")
header("Some Gadfly extension interfaces")
p("""A gadfly database engine may be extended with user defined
functions, predicates, and alternative table and index implementations.
This section snapshots several Gadfly 2.0 interfaces, currently under
development and likely to change before the package is released.""")
p("""The basic interface for adding functions and predicates (logical tests)
to a gadfly engine are relatively straightforward. For example to add the
ability to match a regular expression within a gadfly query use the
following implementation.""")
pre("""
from re import match
def addrematch(gadflyinstance):
gadflyinstance.add_predicate("rematch", match)
""")
p("""
Then upon connecting to the database execute
""")
pre("""
g = gadfly(...)
...
addrematch(g)
""")
p("""
In this case the "semijoin operation" associated with the new predicate
"rematch" is automatically generated, and after the add_predicate
binding operation the gadfly instance supports queries such as""")
pre("""
select drinker, beer
from likes
where rematch('b*', beer) and drinker not in
(select drinker from frequents where rematch('c*', bar))
""")
p("""
By embedding the "rematch" operation within the query the SQL
engine can do "more work" for the programmer and reduce or eliminate the
need to process the query result externally to the engine.
""")
p("""
In a similar manner functions may be added to a gadfly instance,""")
pre("""
def modulo(x,y):
return x % y
def addmodulo(gadflyinstance):
gadflyinstance.add_function("modulo", modulo)
...
g = gadfly(...)
...
addmodulo(g)
""")
p("""
Then after the binding the modulo function can be used whereever
an SQL expression can occur.
""")
p("""
Adding alternative table implementations to a Gadfly instance
is more interesting and more difficult. An "extension table" implementation
must conform to the following interface:""")
pre("""
# get the kjbuckets.kjSet set of attribute names for this table
names = table.attributes()
# estimate the difficulty of evaluating a join given known attributes
# return None for "impossible" or n>=0 otherwise with larger values
# indicating greater difficulty or expense
estimate = table.estimate(known_attributes)
# return the join of the rows of the table with
# the list of kjbuckets.kjDict mappings as a list of mappings.
resultmappings = table.join(listofmappings)
""")
p("""
In this case add the table to a gadfly instance using""")
pre("""
gadflyinstance.add_table("table_name", table)
""")
p("""
For example to add a table which automatically queries filenames
in the filesystems of the host computer a gadfly instance could
be augmented with a GLOB table implemented using the standard
library function glob.glob as follows:""")
pre("""
import kjbuckets
class GlobTable:
def __init__(self): pass
def attributes(self):
return kjbuckets.kjSet("PATTERN", "NAME")
def estimate(self, known_attributes):
if known_attributes.member("PATTERN"):
return 66 # join not too difficult
else:
return None # join is impossible (must have PATTERN)
def join(self, listofmappings):
from glob import glob
result = []
for m in listofmappings:
pattern = m["PATTERN"]
for name in glob(pattern):
newmapping = kjbuckets.kjDict(m)
newmapping["NAME"] = name
if newmapping.Clean():
result.append(newmapping)
return result
...
gadfly_instance.add_table("GLOB", GlobTable())
""")
p("""
Then one could formulate queries such as "list the files in directories
associated with packages installed by guido"
""")
pre("""
select g.name as filename
from packages p, glob g
where p.installer = 'guido' and g.pattern=p.root_directory
""")
p("""
Note that conceptually the GLOB table is an infinite table including
all filenames on the current computer in the "NAME" column, paired with
a potentially infinite number of patterns.
""")
p("""
More interesting examples would allow queries to remotely access
data served by an HTTP server, or from any other resource.
""")
p("""
Furthermore an extension table can be augmented with update methods
""")
pre("""
table.insert_rows(listofmappings)
table.update_rows(oldlist, newlist)
table.delete_rows(oldlist)
""")
p("""
Note: at present the implementation does not enforce recovery or
transaction semantics for updates to extension tables, although this
may change in the final release.
""")
p("""
The table implementation is free to provide its own implementations of
indices which take advantage of data provided by the join argument.
""")
header("Efficiency Notes")
p("""The following thought experiment attempts to explain why the
Gadfly implementation is surprisingly fast considering that it
is almost entirely implemented in Python (an interpreted programming
language which is not especially fast when compared to alternatives).
Although Gadfly is quite complex, at an abstract level the process
of query evaluation boils down to a series of embedded loops.
Consider the following nested loops:""")
pre("""
iterate 1000:
f(...) # fixed cost of outer loop
iterate 10:
g(...) # fixed cost of middle loop
iterate 10:
# the real work (string parse, matrix mul, query eval...)
h(...)""")
p("""In my experience many computations follow this pattern where
f, g, are complex, dynamic, special purpose and h is simple, general
purpose, static. Some example computations that follow this pattern
include: file massaging (perl), matrix manipulation (python, tcl),
database/cgi page generation, and vector graphics/imaging.""")
p("""Suppose implementing f, g, h in python is easy but result in
execution times10 times slower than a much harder implementation
in C, choosing arbitrary and debatable numbers assume each function
call consumes 1 tick in C, 5 ticks in java, 10 ticks in python
for a straightforward implementation of each function f, g, and
h. Under these conditions we get the following cost analysis,
eliminating some uninteresting combinations, of implementing the
function f, g, and h in combinations of Python, C and java:""")
pre("""
COST | FLANG | GLANG | HLANG
==================================
111000 | C | C | C
115000 | java | C | C
120000 | python | C | C
155000 | java | java | C
210000 | python | python | C
555000 | java | java | java
560000 | python | java | java
610000 | python | python | java
1110000 | python | python | python
""")
p("""Note that moving only the innermost loop to C (python/python/C)
speeds up the calculation by half an order of magnitude compared
to the python-only implementation and brings the speed to within
a factor of 2 of an implementation done entirely in C.""")
p("""Although this artificial and contrived thought experiment is
far from conclusive, we may be tempted to draw the conclusion
that generally programmers should focus first on obtaining a working
implementation (because as John Ousterhout is reported to have
said "the biggest performance improvement is the transition
from non-working to working") using the methodology that
is most likely to obtain a working solution the quickest (Python). Only then if the performance
is inadequate should the programmer focus on optimizing
the inner most loops, perhaps moving them to a very efficient
implementation (C). Optimizing the outer loops will buy little
improvement, and should be done later, if ever.""")
p("""This was precisely the strategy behind the gadfly implementations,
where most of the inner loops are implemented in the kjbuckets
C extension module and the higher level logic is all in Python.
This also explains why gadfly appears to be "slower"
for simple queries over small data sets, but seems to be relatively
"faster" for more complex queries over larger data sets,
since larger queries and data sets take better advantage of the
optimized inner loops.""")
header("A Gadfly variant for OLAP?")
p("""In private correspondence Andy Robinson points out that the
basic logical design underlying Gadfly could be adapted to provide
Online Analytical Processing (OLAP) and other forms of data warehousing
and data mining. Since SQL is not particularly well suited for
the kinds of requests common in these domains the higher level
interfaces would require modification, but the underlying logic
of substitutions and name mappings seems to be appropriate.""")
header("Conclusion")
p("""The revamped query engine design in Gadfly 2 supports
a flexible and general extension methodology that permits programmers
to extend the gadfly engine to include additional computations
and access to remote data sources. Among other possibilities this
will permit the gadfly engine to make use of disk based indexed
tables and to dynamically retrieve information from remote data
sources (such as an Excel spreadsheet or an Oracle database).
These features will make gadfly a very useful tool for data manipulation
and integration.""")
header("References")
p("""[Van Rossum] Van Rossum, Python Reference Manual, Tutorial, and Library Manuals,
please look to http://www.python.org
for the latest versions, downloads and links to printed versions.""")
p("""[O'Neill] O'Neill, P., Data Base Principles, Programming, Performance,
Morgan Kaufmann Publishers, San Francisco, 1994.""")
p("""[Korth and Silberschatz] Korth, H. and Silberschatz, A. and Sudarshan, S.
Data Base System Concepts, McGraw-Hill Series in Computer Science, Boston,
1997""")
p("""[Gadfly]Gadfly: SQL Relational Database in Python,
http://www.chordate.com/kwParsing/gadfly.html""")
go()
|
{
"content_hash": "ade2e0abe63b1911e0d6c91904df53ac",
"timestamp": "",
"source": "github",
"line_count": 900,
"max_line_length": 95,
"avg_line_length": 36,
"alnum_prop": 0.7303703703703703,
"repo_name": "makinacorpus/reportlab-ecomobile",
"id": "836ed5475f796e2ea75aed2dbe5d7f42db2204c3",
"size": "32480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/gadflypaper/gfe.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "764229"
},
{
"name": "C++",
"bytes": "2019"
},
{
"name": "Java",
"bytes": "6333"
},
{
"name": "Python",
"bytes": "2863462"
},
{
"name": "Shell",
"bytes": "1783"
}
],
"symlink_target": ""
}
|
'''
Common namespaces
Copyright 2012-2020 Codinuum Software Lab <https://codinuum.com>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
# Fortran namespaces added by Masatomo Hashimoto <m.hashimoto@riken.jp>
XSD_NS = 'http://www.w3.org/2001/XMLSchema#'
RDF_NS = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
OWL_NS = 'http://www.w3.org/2002/07/owl#'
FB_NS = 'http://codinuum.com/fb/'
GUARD_NS = 'http://codinuum.com/fact/guard/'
SRC_NS = 'http://codinuum.com/ontologies/2012/10/source-code-entity#'
VER_NS = 'http://codinuum.com/ontologies/2012/10/versioning#'
CHG_NS = 'http://codinuum.com/ontologies/2012/10/primitive-change#'
CLONE_NS = 'http://codinuum.com/ontologies/2013/01/clone#'
CCFX_NS = 'http://codinuum.com/ontologies/2015/02/ccfx#'
SOOT_NS = 'http://codinuum.com/ontologies/2016/05/soot#'
DELTA_NS = 'http://codinuum.com/ontologies/2016/05/delta#'
CPP_NS = 'http://codinuum.com/ontologies/2019/02/cpp-entity#'
C_NS = 'http://codinuum.com/ontologies/2012/10/c-entity#'
JAVA_NS = 'http://codinuum.com/ontologies/2012/10/java-entity#'
V_NS = 'http://codinuum.com/ontologies/2012/10/verilog-entity#'
PY_NS = 'http://codinuum.com/ontologies/2012/10/python-entity#'
F_NS = 'http://codinuum.com/ontologies/2013/05/fortran-entity#'
PA_NS = 'http://codinuum.com/ontologies/2013/05/performance-analysis#'
FJPA_NS = 'http://codinuum.com/ontologies/2013/05/fujitsu-performance-analysis#'
FPT_NS = 'http://codinuum.com/ontologies/2013/08/fortran-performance-tuning#'
GIT_NS = 'http://codinuum.com/ontologies/2014/06/git#'
SVN_NS = 'http://codinuum.com/svn/fact/predicate#'
NCC_NS = 'http://codinuum.com/ontologies/2014/06/ncc#'
ICFGC_NS = 'http://codinuum.com/ontologies/2014/08/interprocedural-control-flow-c#'
MET_NS = 'http://codinuum.com/ontologies/2014/09/source-code-metrics#'
REF_NS = 'http://codinuum.com/ontologies/2015/10/refactoring#'
JREF_NS = 'http://codinuum.com/ontologies/2015/10/java-refactoring#'
CREF_NS = 'http://codinuum.com/ontologies/2015/10/c-refactoring#'
def make_guard_ns(ns):
return GUARD_NS+'?orig='+ns
# instances
MISSING_ENT_NS = 'http://codinuum.com/fact/missing-entity/'
PREFIX_TBL = {
'xsd': XSD_NS,
'rdf': RDF_NS,
'owl': OWL_NS,
'fb': FB_NS,
'src': SRC_NS,
'ver': VER_NS,
'chg': CHG_NS,
'clone': CLONE_NS,
'ccfx': CCFX_NS,
'soot': SOOT_NS,
'delta': DELTA_NS,
'c': C_NS,
'cpp': CPP_NS,
'java': JAVA_NS,
'v': V_NS,
'py': PY_NS,
'f': F_NS,
'pa': PA_NS,
'fjpa': FJPA_NS,
'fpt': FPT_NS,
'met': MET_NS,
'git': GIT_NS,
'ncc': NCC_NS,
'icfgc': ICFGC_NS,
'guard': GUARD_NS,
'ref': REF_NS,
'jref': JREF_NS,
'cref': CREF_NS,
'ent': 'http://codinuum.com/fact/entity/',
'ext': 'http://codinuum.com/fact/external-name/',
'bid': 'http://codinuum.com/fact/binding/',
'rel': 'http://codinuum.com/fact/version/release/',
'svnrev': 'http://codinuum.com/fact/version/svn/revision/',
'gitrev': 'http://codinuum.com/fact/version/git/revision/',
'variant': 'http://codinuum.com/fact/version/variant/',
'fjpadata': 'http://codinuum.com/fact/padata/fujitsu/',
'entpair': 'http://codinuum.com/fact/entity-pair/',
'chgpat': 'http://codinuum.com/fact/change-pattern/',
'chginst': 'http://codinuum.com/fact/change/',
'missing': MISSING_ENT_NS,
'gsrc': make_guard_ns(SRC_NS),
}
NS_TBL = {}
for (k, v) in PREFIX_TBL.items():
NS_TBL[k+'_ns'] = v
|
{
"content_hash": "f8fe1024acf108bdd6bcd904c594b136",
"timestamp": "",
"source": "github",
"line_count": 120,
"max_line_length": 83,
"avg_line_length": 33.55833333333333,
"alnum_prop": 0.6550782220014899,
"repo_name": "codinuum/cca",
"id": "7e44d5a650ffc4cd16f5edc8ed6aa670e59bd248",
"size": "4051",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/src/cca/ccautil/ns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "14313"
},
{
"name": "Dockerfile",
"bytes": "2875"
},
{
"name": "HTML",
"bytes": "3595"
},
{
"name": "JavaScript",
"bytes": "97522"
},
{
"name": "Makefile",
"bytes": "48422"
},
{
"name": "OCaml",
"bytes": "8499239"
},
{
"name": "Python",
"bytes": "379461"
},
{
"name": "Shell",
"bytes": "13648"
},
{
"name": "Standard ML",
"bytes": "84941"
},
{
"name": "Verilog",
"bytes": "129"
}
],
"symlink_target": ""
}
|
import os
import errno
import urlparse
import itertools
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, SuspiciousOperation
from django.core.files import locks, File
from django.core.files.move import file_move_safe
from django.utils.encoding import force_unicode
from django.utils.functional import LazyObject
from django.utils.importlib import import_module
from django.utils.text import get_valid_filename
from django.utils._os import safe_join
__all__ = ('Storage', 'FileSystemStorage', 'DefaultStorage', 'default_storage')
class Storage(object):
"""
A base storage class, providing some default behaviors that all other
storage systems can inherit or override, as necessary.
"""
# The following methods represent a public interface to private methods.
# These shouldn't be overridden by subclasses unless absolutely necessary.
def open(self, name, mode='rb', mixin=None):
"""
Retrieves the specified file from storage, using the optional mixin
class to customize what features are available on the File returned.
"""
file = self._open(name, mode)
if mixin:
# Add the mixin as a parent class of the File returned from storage.
file.__class__ = type(mixin.__name__, (mixin, file.__class__), {})
return file
def save(self, name, content):
"""
Saves new content to the file specified by name. The content should be a
proper File object, ready to be read from the beginning.
"""
# Get the proper name for the file, as it will actually be saved.
if name is None:
name = content.name
name = self.get_available_name(name)
name = self._save(name, content)
# Store filenames with forward slashes, even on Windows
return force_unicode(name.replace('\\', '/'))
# These methods are part of the public API, with default implementations.
def get_valid_name(self, name):
"""
Returns a filename, based on the provided filename, that's suitable for
use in the target storage system.
"""
return get_valid_filename(name)
def get_available_name(self, name):
"""
Returns a filename that's free on the target storage system, and
available for new content to be written to.
"""
dir_name, file_name = os.path.split(name)
file_root, file_ext = os.path.splitext(file_name)
# If the filename already exists, add an underscore and a number (before
# the file extension, if one exists) to the filename until the generated
# filename doesn't exist.
count = itertools.count(1)
while self.exists(name):
# file_ext includes the dot.
name = os.path.join(dir_name, "%s_%s%s" % (file_root, count.next(), file_ext))
return name
def path(self, name):
"""
Returns a local filesystem path where the file can be retrieved using
Python's built-in open() function. Storage systems that can't be
accessed using open() should *not* implement this method.
"""
raise NotImplementedError("This backend doesn't support absolute paths.")
# The following methods form the public API for storage systems, but with
# no default implementations. Subclasses must implement *all* of these.
def delete(self, name):
"""
Deletes the specified file from the storage system.
"""
raise NotImplementedError()
def exists(self, name):
"""
Returns True if a file referened by the given name already exists in the
storage system, or False if the name is available for a new file.
"""
raise NotImplementedError()
def listdir(self, path):
"""
Lists the contents of the specified path, returning a 2-tuple of lists;
the first item being directories, the second item being files.
"""
raise NotImplementedError()
def size(self, name):
"""
Returns the total size, in bytes, of the file specified by name.
"""
raise NotImplementedError()
def url(self, name):
"""
Returns an absolute URL where the file's contents can be accessed
directly by a Web browser.
"""
raise NotImplementedError()
class FileSystemStorage(Storage):
"""
Standard filesystem storage
"""
def __init__(self, location=None, base_url=None):
if location is None:
location = settings.MEDIA_ROOT
if base_url is None:
base_url = settings.MEDIA_URL
self.location = os.path.abspath(location)
self.base_url = base_url
def _open(self, name, mode='rb'):
return File(open(self.path(name), mode))
def _save(self, name, content):
full_path = self.path(name)
directory = os.path.dirname(full_path)
if not os.path.exists(directory):
os.makedirs(directory)
elif not os.path.isdir(directory):
raise IOError("%s exists and is not a directory." % directory)
# There's a potential race condition between get_available_name and
# saving the file; it's possible that two threads might return the
# same name, at which point all sorts of fun happens. So we need to
# try to create the file, but if it already exists we have to go back
# to get_available_name() and try again.
while True:
try:
# This file has a file path that we can move.
if hasattr(content, 'temporary_file_path'):
file_move_safe(content.temporary_file_path(), full_path)
content.close()
# This is a normal uploadedfile that we can stream.
else:
# This fun binary flag incantation makes os.open throw an
# OSError if the file already exists before we open it.
fd = os.open(full_path, os.O_WRONLY | os.O_CREAT | os.O_EXCL | getattr(os, 'O_BINARY', 0))
try:
locks.lock(fd, locks.LOCK_EX)
for chunk in content.chunks():
os.write(fd, chunk)
finally:
locks.unlock(fd)
os.close(fd)
except OSError, e:
if e.errno == errno.EEXIST:
# Ooops, the file exists. We need a new file name.
name = self.get_available_name(name)
full_path = self.path(name)
else:
raise
else:
# OK, the file save worked. Break out of the loop.
break
if settings.FILE_UPLOAD_PERMISSIONS is not None:
os.chmod(full_path, settings.FILE_UPLOAD_PERMISSIONS)
return name
def delete(self, name):
name = self.path(name)
# If the file exists, delete it from the filesystem.
if os.path.exists(name):
os.remove(name)
def exists(self, name):
return os.path.exists(self.path(name))
def listdir(self, path):
path = self.path(path)
directories, files = [], []
for entry in os.listdir(path):
if os.path.isdir(os.path.join(path, entry)):
directories.append(entry)
else:
files.append(entry)
return directories, files
def path(self, name):
try:
path = safe_join(self.location, name)
except ValueError:
raise SuspiciousOperation("Attempted access to '%s' denied." % name)
return os.path.normpath(path)
def size(self, name):
return os.path.getsize(self.path(name))
def url(self, name):
if self.base_url is None:
raise ValueError("This file is not accessible via a URL.")
return urlparse.urljoin(self.base_url, name).replace('\\', '/')
def get_storage_class(import_path=None):
if import_path is None:
import_path = settings.DEFAULT_FILE_STORAGE
try:
dot = import_path.rindex('.')
except ValueError:
raise ImproperlyConfigured("%s isn't a storage module." % import_path)
module, classname = import_path[:dot], import_path[dot+1:]
try:
mod = import_module(module)
except ImportError, e:
raise ImproperlyConfigured('Error importing storage module %s: "%s"' % (module, e))
try:
return getattr(mod, classname)
except AttributeError:
raise ImproperlyConfigured('Storage module "%s" does not define a "%s" class.' % (module, classname))
class DefaultStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class()()
default_storage = DefaultStorage()
|
{
"content_hash": "b07edf451fe5fc95ae6581696c939514",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 110,
"avg_line_length": 36.59016393442623,
"alnum_prop": 0.6057347670250897,
"repo_name": "350dotorg/Django",
"id": "17e694745020ece3144e962cf924e8c3c7003ae1",
"size": "8928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/core/files/storage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "91594"
},
{
"name": "Python",
"bytes": "6070525"
},
{
"name": "Shell",
"bytes": "5799"
}
],
"symlink_target": ""
}
|
import sys
def do_start(logger,csp_soap_client,cucm_variable_axl,cspconfigfile):
# *------------------------------------------------------------------
# * function do(logger,csp_soap_client,cucm_variable_axl)
# *
# * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Mandatory (pattern,usage,routePartitionName)
# Realizamos la sincronizacion del LDAP
logger.debug('Ha entrado en la funcion do_start del archivo cspaxl_LdapSync.py')
logger.info('Vamos a realizar la sincronizacion del LDAP: %s' % cucm_variable_axl)
try:
csp_ldap = {'name': cucm_variable_axl}
result = csp_soap_client.service.doLdapSync(csp_ldap,sync=0)
except:
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
return {'Status': False, 'Detail': sys.exc_info()[1]}
else:
return {'Status':True,'Detail':result['return']}
def do_cancel(logger,csp_soap_client,cucm_variable_axl):
# *------------------------------------------------------------------
# * function do(logger,csp_soap_client,cucm_variable_axl)
# *
# * Copyright (C) 2016 Carlos Sanz <carlos.sanzpenas@gmail.com>
# *
# * This program is free software; you can redistribute it and/or
# * modify it under the terms of the GNU General Public License
# * as published by the Free Software Foundation; either version 2
# * of the License, or (at your option) any later version.
# *
# * This program is distributed in the hope that it will be useful,
# * but WITHOUT ANY WARRANTY; without even the implied warranty of
# * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# * GNU General Public License for more details.
# *
# * You should have received a copy of the GNU General Public License
# * along with this program; if not, write to the Free Software
# * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# *------------------------------------------------------------------
# *
# Mandatory (pattern,usage,routePartitionName)
# Damos de alta el Translation Pattern
try:
result = csp_soap_client.service.doLdapSync(name=cucm_variable_axl,sync='false')
except:
logger.debug(sys.exc_info())
logger.error(sys.exc_info()[1])
return {'Status': False, 'Detail': sys.exc_info()[1]}
else:
return {'Status':True,'Detail':result['return']}
|
{
"content_hash": "58119bdee3f295b91199f576310f57b2",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 88,
"avg_line_length": 46.25,
"alnum_prop": 0.609009009009009,
"repo_name": "sanzcarlos/CiscoCollab",
"id": "a626d0b43db19a652247a76deb07a94c4958e729",
"size": "4386",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CiscoAXL/cspaxl_LdapSync.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "184591"
}
],
"symlink_target": ""
}
|
import datetime as dt
import enum
import sqlalchemy as sa
from . import app, db
meta = sa.MetaData()
Category = sa.Table(
'category', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
)
class Statuses(enum.Enum):
available = 'available'
pending = 'pending'
sold = 'sold'
Pet = sa.Table(
'pet', meta,
sa.Column('id', sa.Integer, primary_key=True),
sa.Column('name', sa.String(255), nullable=False),
sa.Column('image', sa.String(255), nullable=False),
sa.Column('status', sa.Enum(Statuses), nullable=False),
sa.Column('category_id', sa.ForeignKey('category.id'), nullable=False),
)
# don't do on production, this is only for the example
@app.on_startup
async def create_schema():
meta.create_all(sa.create_engine(db.cfg.URL))
|
{
"content_hash": "1bbae5ad84cf2cfb75dce3972b99d404",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 23.97142857142857,
"alnum_prop": 0.66626936829559,
"repo_name": "klen/muffin-rest",
"id": "4551d50a9ba74adb2651de86c75033abcb53765a",
"size": "839",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "examples/sqlalchemy_core/tables.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4617"
},
{
"name": "Makefile",
"bytes": "1668"
},
{
"name": "Python",
"bytes": "104650"
}
],
"symlink_target": ""
}
|
from solpy import irradiation
from caelum import eere
# import eree
import datetime
def yearly_total_irr(place, az, tr): #, interval=30, ccd=None
"""Function which estimates the total irradiation.
Input: location (lat, lon),
az (azimuth in degrees, south is at 180 degrees),
tr (tilt of the roof in degrees, flat roof is 0),
#interval (what is the precision of the integration in minutes),
#cloud cover data (dictionary with floats from 0 to 1, for each day of the year in mmdd format (e.g. '1231');
get it from your local weather station).
Returns total yearly irradiation for the tilted and oriented surface in kWh/m^2.
"""
#-- Old method with KNMI data
#-- Counter for the yearly irradiation in kWh/m^2
# yearly_sum = 0
# #-- Compute for all dates and times
# for month in range(1, 13):
# for day in range(1, 32):
# #-- Skip these dates
# if (day == 29 or day == 30) and month == 2:
# continue
# if day == 31 and month in (2, 4, 6, 9, 11):
# continue
# #-- Daily value reset
# daily_rads = 0
# #-- Tweaking to get the proper key values for dates
# if month < 10:
# m_s = '0' + str(month)
# else:
# m_s = str(month)
# if day < 10:
# d_s = '0' + str(day)
# else:
# d_s = str(day)
# d = datetime.date(2013, month, day)
# #-- These are UTC times. The program is not smart enough to use sunrise and sunset times, but this works too
# for hour in range(3, 20):
# for minute in range(0, 60, interval):
# #-- Datetime
# t = datetime.time(hour, minute)
# dt = datetime.datetime.combine(d, t)
# #-- Get the historic cloud cover for that day
# if ccd:
# cloud_cover = ccd[str(m_s)+str(d_s)]
# else:
# cloud_cover = 0.0
# #-- Global synthetic irradiation from Solpy
# global_irradiation_rec = irradiation.blave(dt, place, 0, 180, cloud_cover)
# #-- Adjust it for the tilt. The value is now in W/m^2
# irrValue = irradiation.irradiation(global_irradiation_rec, place, None, tr, az, 'p9')
# #-- Integrate the value over the time interval (get Wh/m^2) and convert it to kWh/m^2 and add it to the daily summed value
# daily_rads += (irrValue * (float(interval)/60.0)) / 1000.0
# #-- When finished with the day, add the estimated value to the yearly sum
# yearly_sum += daily_rads
#-- EPW Weather data
STATION_CODE = '062400' # '062400' for Amsterdam
#-- Fetch the dataset thanks to the caelum library
records = eere.EPWdata(STATION_CODE)
#-- Get the global yearly irradiance (Wh/m^2/year)
TOTAL = sum([irradiation.irradiation(record=rec, location=place, horizon=None, t=tr, array_azimuth=az, model='p9') for rec in records])
#-- Divide it by 1000 to get the value in kWh/m^2/year
yearly_sum = TOTAL/1000.
#-- Yearly irradiation in kWh/m^2/year
return yearly_sum
|
{
"content_hash": "b1685bc2a75fdc94f1501b1c46d42719",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 144,
"avg_line_length": 43.532467532467535,
"alnum_prop": 0.5492243436754176,
"repo_name": "gijs/Solar3Dcity",
"id": "eda44a9ab7eb9ba88ed80dd18beeca6cc4ecffc8",
"size": "4617",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "irr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "53487"
}
],
"symlink_target": ""
}
|
from tempest.api.compute import base
from tempest import test
class ServerPasswordTestJSON(base.BaseV2ComputeTest):
@classmethod
def setUpClass(cls):
super(ServerPasswordTestJSON, cls).setUpClass()
cls.client = cls.servers_client
resp, cls.server = cls.create_test_server(wait_until="ACTIVE")
@test.attr(type='gate')
def test_get_server_password(self):
resp, body = self.client.get_password(self.server['id'])
self.assertEqual(200, resp.status)
@test.attr(type='gate')
def test_delete_server_password(self):
resp, body = self.client.delete_password(self.server['id'])
self.assertEqual(204, resp.status)
class ServerPasswordTestXML(ServerPasswordTestJSON):
_interface = 'xml'
|
{
"content_hash": "e5f9d3a8736af9c5269c76f58bd18123",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 30.64,
"alnum_prop": 0.6932114882506527,
"repo_name": "Mirantis/tempest",
"id": "50c881a26dd5de5c2a8b957a2de5113e6060f962",
"size": "1398",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tempest/api/compute/servers/test_server_password.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3297127"
},
{
"name": "Shell",
"bytes": "8663"
}
],
"symlink_target": ""
}
|
import sys
import pycurl
class Test:
def __init__(self):
self.contents = ''
def body_callback(self, buf):
self.contents = self.contents + buf
sys.stderr.write("Testing %s\n" % pycurl.version)
t = Test()
c = pycurl.Curl()
c.setopt(c.URL, 'http://curl.haxx.se/dev/')
c.setopt(c.WRITEFUNCTION, t.body_callback)
c.perform()
c.close()
print(t.contents)
|
{
"content_hash": "e5e52c912fa7541bcd5450a59378db1e",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 49,
"avg_line_length": 18.85,
"alnum_prop": 0.6472148541114059,
"repo_name": "cloudaice/simple-data",
"id": "44060af9de0bd98900a6b184a66a346d139ff6ed",
"size": "450",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "misc/virtenv/share/doc/pycurl/examples/basicfirst.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4842"
},
{
"name": "JavaScript",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "1453381"
},
{
"name": "Shell",
"bytes": "4189"
}
],
"symlink_target": ""
}
|
import hashlib
import json
import os
import unicodedata
import uuid
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.dispatch import receiver
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_str
import commonware
from jingo.helpers import urlparams
from uuidfield.fields import UUIDField
import mkt
from mkt.site.decorators import use_master
from mkt.site.helpers import absolutify
from mkt.site.models import ModelBase, OnChangeMixin
from mkt.site.storage_utils import (copy_stored_file, move_stored_file,
private_storage, public_storage)
from mkt.site.utils import smart_path
log = commonware.log.getLogger('z.files')
# Acceptable extensions.
EXTENSIONS = ('.webapp', '.json', '.zip')
class File(OnChangeMixin, ModelBase):
STATUS_CHOICES = mkt.STATUS_CHOICES.items()
version = models.ForeignKey('versions.Version', related_name='files')
filename = models.CharField(max_length=255, default='')
size = models.PositiveIntegerField(default=0) # In bytes.
hash = models.CharField(max_length=255, default='')
status = models.PositiveSmallIntegerField(choices=STATUS_CHOICES,
default=mkt.STATUS_PENDING,
db_index=True)
datestatuschanged = models.DateTimeField(null=True, auto_now_add=True)
reviewed = models.DateTimeField(null=True)
class Meta(ModelBase.Meta):
db_table = 'files'
index_together = (('datestatuschanged', 'version'),
('created', 'version'))
def __unicode__(self):
return unicode(self.id)
@property
def has_been_validated(self):
try:
self.validation
except FileValidation.DoesNotExist:
return False
else:
return True
def get_url_path(self, src):
url = os.path.join(reverse('downloads.file', args=[self.id]),
self.filename)
# Firefox's Add-on Manager needs absolute urls.
return absolutify(urlparams(url, src=src))
@classmethod
def from_upload(cls, upload, version, parse_data={}):
upload.path = smart_path(nfd_str(upload.path))
ext = os.path.splitext(upload.path)[1]
f = cls(version=version)
f.filename = f.generate_filename(extension=ext or '.zip')
f.size = private_storage.size(upload.path) # Size in bytes.
f.status = mkt.STATUS_PENDING
# Re-use the file-upload hash if we can, no need to regenerate a new
# one if we can avoid that.
f.hash = upload.hash or f.generate_hash(upload.path)
f.save()
log.debug('New file: %r from %r' % (f, upload))
# Move the uploaded file from the temp location.
copy_stored_file(
upload.path,
os.path.join(version.path_prefix, nfd_str(f.filename)),
src_storage=private_storage,
dst_storage=private_storage)
if upload.validation:
FileValidation.from_json(f, upload.validation)
return f
@property
def addon(self):
from mkt.versions.models import Version
from mkt.webapps.models import Webapp
version = Version.with_deleted.get(pk=self.version_id)
return Webapp.with_deleted.get(pk=version.addon_id)
def generate_hash(self, filename=None):
"""Generate a hash for a file."""
hash = hashlib.sha256()
with private_storage.open(filename or self.file_path, 'rb') as obj:
for chunk in iter(lambda: obj.read(1024), ''):
hash.update(chunk)
return 'sha256:%s' % hash.hexdigest()
def generate_filename(self, extension=None):
"""
Files are in the format of: {app_slug}-{version}.{extension}
"""
parts = []
addon = self.version.addon
# slugify drops unicode so we may end up with an empty string.
# Apache did not like serving unicode filenames (bug 626587).
extension = extension or '.zip' if addon.is_packaged else '.webapp'
# Apparently we have non-ascii slugs leaking into prod :(
# FIXME.
parts.append(slugify(addon.app_slug) or 'app')
parts.append(self.version.version)
self.filename = '-'.join(parts) + extension
return self.filename
@property
def file_path(self):
if self.status == mkt.STATUS_DISABLED:
return self.guarded_file_path
else:
return self.approved_file_path
@property
def approved_file_path(self):
return os.path.join(settings.ADDONS_PATH, str(self.version.addon_id),
self.filename)
@property
def guarded_file_path(self):
return os.path.join(settings.GUARDED_ADDONS_PATH,
str(self.version.addon_id), self.filename)
@property
def signed_file_path(self):
return os.path.join(settings.SIGNED_APPS_PATH,
str(self.version.addon_id), self._signed())
@property
def signed_reviewer_file_path(self):
return os.path.join(settings.SIGNED_APPS_REVIEWER_PATH,
str(self.version.addon_id), self._signed())
def _signed(self):
split = self.filename.rsplit('.', 1)
split.insert(-1, 'signed')
return '.'.join(split)
@property
def extension(self):
return os.path.splitext(self.filename)[-1]
@classmethod
def mv(cls, src, dst, msg, src_storage, dst_storage):
"""Move a file from src to dst."""
try:
if src_storage.exists(src):
log.info(msg % (src, dst))
move_stored_file(src, dst, src_storage=src_storage,
dst_storage=dst_storage)
except UnicodeEncodeError:
log.error('Move Failure: %s %s' % (smart_str(src), smart_str(dst)))
def hide_disabled_file(self):
"""Move a disabled file to the guarded file path."""
if not self.filename:
return
src, dst = self.approved_file_path, self.guarded_file_path
self.mv(src, dst, 'Moving disabled file: %s => %s',
src_storage=public_storage, dst_storage=private_storage)
def unhide_disabled_file(self):
"""Move a public file from the guarded file path."""
if not self.filename:
return
src, dst = self.guarded_file_path, self.approved_file_path
self.mv(src, dst, 'Moving undisabled file: %s => %s',
src_storage=private_storage, dst_storage=public_storage)
@use_master
def update_status(sender, instance, **kw):
if not kw.get('raw'):
try:
instance.version.addon.reload()
instance.version.addon.update_status()
if 'delete' in kw:
instance.version.addon.update_version(ignore=instance.version)
else:
instance.version.addon.update_version()
except models.ObjectDoesNotExist:
pass
def update_status_delete(sender, instance, **kw):
kw['delete'] = True
return update_status(sender, instance, **kw)
models.signals.post_save.connect(
update_status, sender=File, dispatch_uid='version_update_status')
models.signals.post_delete.connect(
update_status_delete, sender=File, dispatch_uid='version_update_status')
@receiver(models.signals.post_delete, sender=File,
dispatch_uid='cleanup_file')
def cleanup_file(sender, instance, **kw):
""" On delete of the file object from the database, unlink the file from
the file system """
if kw.get('raw') or not instance.filename:
return
# Use getattr so the paths are accessed inside the try block.
for path in ('file_path', 'guarded_file_path', 'reviewer_signed_file_path',
'signed_file_path'):
try:
filename = getattr(instance, path, None)
except models.ObjectDoesNotExist:
return
if filename and (public_storage.exists(filename) or
private_storage.exists(filename)):
log.info('Removing filename: %s for file: %s'
% (filename, instance.pk))
public_storage.delete(filename)
private_storage.delete(filename)
@File.on_change
def check_file(old_attr, new_attr, instance, sender, **kw):
if kw.get('raw'):
return
old, new = old_attr.get('status'), instance.status
if new == mkt.STATUS_DISABLED and old != mkt.STATUS_DISABLED:
instance.hide_disabled_file()
elif old == mkt.STATUS_DISABLED and new != mkt.STATUS_DISABLED:
instance.unhide_disabled_file()
# Log that the hash has changed.
old, new = old_attr.get('hash'), instance.hash
if old != new:
try:
addon = instance.version.addon.pk
except models.ObjectDoesNotExist:
addon = 'unknown'
log.info('Hash changed for file: %s, addon: %s, from: %s to: %s' %
(instance.pk, addon, old, new))
class FileUpload(ModelBase):
"""Created when a file is uploaded for validation/submission."""
uuid = UUIDField(primary_key=True, auto=True)
path = models.CharField(max_length=255, default='')
name = models.CharField(max_length=255, default='',
help_text="The user's original filename")
hash = models.CharField(max_length=255, default='')
user = models.ForeignKey('users.UserProfile', null=True)
valid = models.BooleanField(default=False)
validation = models.TextField(null=True)
task_error = models.TextField(null=True)
class Meta(ModelBase.Meta):
db_table = 'file_uploads'
def __unicode__(self):
return self.uuid
def save(self, *args, **kw):
if self.validation:
try:
if json.loads(self.validation)['errors'] == 0:
self.valid = True
except Exception:
log.error('Invalid validation json: %r' % self)
super(FileUpload, self).save()
def add_file(self, chunks, filename, size):
filename = smart_str(filename)
loc = os.path.join(settings.ADDONS_PATH, 'temp', uuid.uuid4().hex)
base, ext = os.path.splitext(smart_path(filename))
if ext in EXTENSIONS:
loc += ext
log.info('UPLOAD: %r (%s bytes) to %r' % (filename, size, loc))
hash = hashlib.sha256()
# The buffer might have been read before, so rewind back at the start.
if hasattr(chunks, 'seek'):
chunks.seek(0)
with private_storage.open(loc, 'wb') as fd:
for chunk in chunks:
hash.update(chunk)
fd.write(chunk)
self.path = loc
self.name = filename
self.hash = 'sha256:%s' % hash.hexdigest()
self.save()
@classmethod
def from_post(cls, chunks, filename, size, **kwargs):
fu = FileUpload(**kwargs)
fu.add_file(chunks, filename, size)
return fu
@property
def processed(self):
return bool(self.valid or self.validation)
class FileValidation(ModelBase):
file = models.OneToOneField(File, related_name='validation')
valid = models.BooleanField(default=False)
errors = models.IntegerField(default=0)
warnings = models.IntegerField(default=0)
notices = models.IntegerField(default=0)
validation = models.TextField()
class Meta:
db_table = 'file_validation'
@classmethod
def from_json(cls, file, validation):
js = json.loads(validation)
new = cls(file=file, validation=validation, errors=js['errors'],
warnings=js['warnings'], notices=js['notices'])
new.valid = new.errors == 0
new.save()
return new
def nfd_str(u):
"""Uses NFD to normalize unicode strings."""
if isinstance(u, unicode):
return unicodedata.normalize('NFD', u).encode('utf-8')
return u
|
{
"content_hash": "783d17ab35de19ddd7a74fe2e96037bc",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 79,
"avg_line_length": 35.005780346820806,
"alnum_prop": 0.6115422721268163,
"repo_name": "washort/zamboni",
"id": "e88941458b5da8a631188951f71759b24a93689e",
"size": "12112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/files/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "354243"
},
{
"name": "HTML",
"bytes": "2383319"
},
{
"name": "JavaScript",
"bytes": "532109"
},
{
"name": "Makefile",
"bytes": "4313"
},
{
"name": "Python",
"bytes": "4735484"
},
{
"name": "Shell",
"bytes": "11135"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
}
|
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name = "bluclobber",
version = "0.0.1",
author = "James Hetherington",
author_email = "j.hetherington@ucl.ac.uk",
description = ("Harness for cluster map/reduce analysis of ALTO books corpus"),
license = "BSD",
keywords = "digital humanities research books",
url = "http://development.rc.ucl.ac.uk/",
packages=['bluclobber'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 2 - Pre-Alpha",
"Topic :: Research :: Humanities",
"License :: OSI Approved :: BSD License",
],
entry_points={
'console_scripts': [
'bluclobber = bluclobber.harness.query:main',
'bluclobber_repartition = bluclobber.harness.repartition:main'
]
},
)
|
{
"content_hash": "e3fde5d774372b509be833184c711826",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 83,
"avg_line_length": 31.482758620689655,
"alnum_prop": 0.6111719605695509,
"repo_name": "UCL-dataspring/cluster-code",
"id": "9bb86454e0b1c81435c9f8d91443ab63b1a2345c",
"size": "913",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49104"
},
{
"name": "Shell",
"bytes": "1186"
}
],
"symlink_target": ""
}
|
courses = {
'feb2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Peter C.'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian',
'assistant': 'Andy'}},
'apr2012': { 'cs101': {'name': 'Building a Search Engine',
'teacher': 'Dave',
'assistant': 'Sarah'},
'cs212': {'name': 'The Design of Computer Programs',
'teacher': 'Peter N.',
'assistant': 'Andy',
'prereq': 'cs101'},
'cs253': {'name': 'Web Application Engineering - Building a Blog',
'teacher': 'Steve',
'prereq': 'cs101'},
'cs262': {'name': 'Programming Languages - Building a Web Browser',
'teacher': 'Wes',
'assistant': 'Peter C.',
'prereq': 'cs101'},
'cs373': {'name': 'Programming a Robotic Car',
'teacher': 'Sebastian'},
'cs387': {'name': 'Applied Cryptography',
'teacher': 'Dave'}},
'jan2044': { 'cs001': {'name': 'Building a Quantum Holodeck',
'teacher': 'Dorina'},
'cs003': {'name': 'Programming a Robotic Robotics Teacher',
'teacher': 'Jasper'},
}
}
# For the following questions, you will find the
# for <key> in <dictionary>:
# <block>
# construct useful. This loops through the key values in the Dictionary. For
# example, this procedure returns a list of all the courses offered in the given
# hexamester:
def courses_offered(courses, hexamester):
res = []
for c in courses[hexamester]:
res.append(c)
return res
# Define a procedure, when_offered(courses, course), that takes a courses data
# structure and a string representing a class, and returns a list of strings
# representing the hexamesters when the input course is offered.
def when_offered(courses,course):
hexa = []
for h in courses:
if course in courses[h]:
hexa.append(h)
return hexa
print when_offered (courses, 'cs101')
#>>> ['apr2012', 'feb2012']
print when_offered(courses, 'bio893')
#>>> []
|
{
"content_hash": "f2e617805068150be0838d048df5823e",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 84,
"avg_line_length": 39.98412698412698,
"alnum_prop": 0.48352520841603813,
"repo_name": "xala3pa/Computer-Science-cs101",
"id": "7a297d4ab2b9ff21f63062ec50935409e2e61e9c",
"size": "2849",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Lesson5/when_Offered.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "84588"
}
],
"symlink_target": ""
}
|
import pickle
from autobahn.websocket.protocol import WebSocketProtocol
class Case:
FAILED = "FAILED"
OK = "OK"
NON_STRICT = "NON-STRICT"
WRONG_CODE = "WRONG CODE"
UNCLEAN = "UNCLEAN"
FAILED_BY_CLIENT = "FAILED BY CLIENT"
INFORMATIONAL = "INFORMATIONAL"
UNIMPLEMENTED = "UNIMPLEMENTED"
# to remove
NO_CLOSE = "NO_CLOSE"
SUBCASES = []
def __init__(self, protocol):
self.p = protocol
self.received = []
self.expected = {}
self.expectedClose = {}
self.behavior = Case.FAILED
self.behaviorClose = Case.FAILED
self.result = "Actual events differ from any expected."
self.resultClose = "TCP connection was dropped without close handshake"
self.reportTime = False
self.reportCompressionRatio = False
self.trafficStats = None
self.subcase = None
self.suppressClose = False # suppresses automatic close behavior (used in cases that deliberately send bad close behavior)
## defaults for permessage-deflate - will be overridden in
## permessage-deflate test cases (but only for those)
##
self.perMessageDeflate = False
self.perMessageDeflateOffers = []
self.perMessageDeflateAccept = lambda connectionRequest, acceptNoContextTakeover, acceptMaxWindowBits, requestNoContextTakeover, requestMaxWindowBits: None
self.init()
def getSubcaseCount(self):
return len(Case.SUBCASES)
def setSubcase(self, subcase):
self.subcase = subcase
def init(self):
pass
def onOpen(self):
pass
def onMessage(self, msg, binary):
self.received.append(("message", msg, binary))
self.finishWhenDone()
def onPing(self, payload):
self.received.append(("ping", payload))
self.finishWhenDone()
def onPong(self, payload):
self.received.append(("pong", payload))
self.finishWhenDone()
def onClose(self, wasClean, code, reason):
pass
def compare(self, obj1, obj2):
return pickle.dumps(obj1) == pickle.dumps(obj2)
def onConnectionLost(self, failedByMe):
# check if we passed the test
for e in self.expected:
if self.compare(self.received, self.expected[e]):
self.behavior = e
self.passed = True
self.result = "Actual events match at least one expected."
break
if self.p.connectionWasOpen:
# check the close status
if self.expectedClose["closedByMe"] != self.p.closedByMe:
self.behaviorClose = Case.FAILED
self.resultClose = "The connection was failed by the wrong endpoint"
elif self.expectedClose["requireClean"] and not self.p.wasClean:
self.behaviorClose = Case.UNCLEAN
self.resultClose = "The spec requires the connection to be failed cleanly here"
elif self.p.remoteCloseCode != None and self.p.remoteCloseCode not in self.expectedClose["closeCode"]:
self.behaviorClose = Case.WRONG_CODE
self.resultClose = "The close code should have been %s or empty" % ','.join(map(str,self.expectedClose["closeCode"]))
elif not self.p.factory.isServer and self.p.droppedByMe:
self.behaviorClose = Case.FAILED_BY_CLIENT
self.resultClose = "It is preferred that the server close the TCP connection"
else:
self.behaviorClose = Case.OK
self.resultClose = "Connection was properly closed"
## for UTF8 tests, closing by wrong endpoint means case failure, since
## the peer then did not detect the invalid UTF8 at all
##
closedByWrongEndpointIsFatal = self.expectedClose.get("closedByWrongEndpointIsFatal", False)
if closedByWrongEndpointIsFatal and self.expectedClose["closedByMe"] != self.p.closedByMe:
self.behavior = Case.FAILED
else:
self.behaviorClose = Case.FAILED
self.resultClose = "The WebSocket opening handshake was never completed!"
def finishWhenDone(self):
# if we match at least one expected outcome check if we are supposed to
# start the closing handshake and if so, do it.
for e in self.expected:
if not self.compare(self.received, self.expected[e]):
return
if self.expectedClose["closedByMe"] and not self.suppressClose:
self.p.sendClose(self.expectedClose["closeCode"][0])
|
{
"content_hash": "e505033d54ddf5589a1aac24e33de5e5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 161,
"avg_line_length": 36.80487804878049,
"alnum_prop": 0.643914292025624,
"repo_name": "tavendo/AutobahnTestSuite",
"id": "9bebae0b4c941a6cb6ea0c1ca31ca8cdd85d111e",
"size": "5325",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "autobahntestsuite/autobahntestsuite/case/case.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1585"
},
{
"name": "HTML",
"bytes": "315736"
},
{
"name": "JavaScript",
"bytes": "6503"
},
{
"name": "Makefile",
"bytes": "1973"
},
{
"name": "Python",
"bytes": "511372"
}
],
"symlink_target": ""
}
|
"""Tests for rpi_power config flow."""
from unittest.mock import MagicMock
from homeassistant.components.rpi_power.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import FlowResultType
from tests.common import patch
MODULE = "homeassistant.components.rpi_power.config_flow.new_under_voltage"
async def test_setup(hass: HomeAssistant) -> None:
"""Test setting up manually."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["type"] == FlowResultType.FORM
assert result["step_id"] == "confirm"
assert not result["errors"]
with patch(MODULE, return_value=MagicMock()):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == FlowResultType.CREATE_ENTRY
async def test_not_supported(hass: HomeAssistant) -> None:
"""Test setting up on not supported system."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
with patch(MODULE, return_value=None):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "no_devices_found"
async def test_onboarding(hass: HomeAssistant) -> None:
"""Test setting up via onboarding."""
with patch(MODULE, return_value=MagicMock()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "onboarding"},
)
assert result["type"] == FlowResultType.CREATE_ENTRY
async def test_onboarding_not_supported(hass: HomeAssistant) -> None:
"""Test setting up via onboarding with unsupported system."""
with patch(MODULE, return_value=None):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "onboarding"},
)
assert result["type"] == FlowResultType.ABORT
assert result["reason"] == "no_devices_found"
|
{
"content_hash": "dc2b9275d5e4ef0118d05fba4bcd480d",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 86,
"avg_line_length": 35.8,
"alnum_prop": 0.681098696461825,
"repo_name": "nkgilley/home-assistant",
"id": "5c474fc0821e0b64a83a9c99dcde5daf388b650c",
"size": "2148",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "tests/components/rpi_power/test_config_flow.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "51597279"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
}
|
import json
import argparse
import logging
import glob
# Logging Information
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(levelname)s: %(message)s')
fh = logging.FileHandler('test_hashes.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(formatter)
logger.addHandler(ch)
parser = argparse.ArgumentParser()
parser.add_argument("hash_num", help="file that we want to verify")
args = parser.parse_args()
hashes = set()
hash_num = args.hash_num
logger.info("Verifying consistency for VirusShare_00" + str(hash_num).zfill(3))
logger.debug("Generating hashes from ../hashes/VirusShare_00" + str(hash_num).zfill(3) + ".md5")
with open(("../hashes/VirusShare_00" + str(hash_num).zfill(3) + ".md5"),'r') as file:
for line in file.readlines()[6:]:
hashes.add(line.strip())
for filename in glob.glob("../analyses/VirusShare_00" + str(hash_num).zfill(3) + ".*"):
logger.debug("Removing hashes from " + filename)
with open(filename,'r') as file:
for line in file.readlines():
hashes.remove(json.loads(line.strip())["md5"])
if len(hashes) == 0:
logger.info("VirusShare_00" + str(hash_num).zfill(3) + ".ldjson is consistent with hashfile")
else:
logger.error("VirusShare_00" + str(hash_num).zfill(3) + ".ldjson is inconsistent with hashfile")
|
{
"content_hash": "852043f70ac1cf1b06dbee298e52e334",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 100,
"avg_line_length": 32.81818181818182,
"alnum_prop": 0.7022160664819944,
"repo_name": "seymour1/label-virusshare",
"id": "7c297e239bcacdbda21309c331f3663fdf4cf595",
"size": "1444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_hashes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "4428"
},
{
"name": "Shell",
"bytes": "807"
}
],
"symlink_target": ""
}
|
import os
from datetime import datetime
from fabric.api import task, local, sudo, put
from fabric.contrib import files
from jinja2 import Environment, FileSystemLoader
from .constants import *
__all__ = ['deploy', 'undeploy', 'backup', 'tail', 'reset_log']
@task
def tail(grep=""):
sudo("tail -F -n +1 {} | grep --line-buffered -i '{}'"
.format(REMOTE_LOG_FILE, grep))
@task
def reset_log():
sudo("rm -f {}".format(REMOTE_LOG_FILE))
sudo("service gunicorn reload")
@task
def deploy():
_upload_archive()
_extract_archive()
_update_py_deps()
_ensure_log_dir()
_configure_gunicorn()
_configure_nginx()
@task
def undeploy():
sudo('rm -rf {}'.format(REMOTE_DEPLOY_DIR))
if files.exists(REMOTE_GUNICORN_CONF_FILE):
sudo('rm {}'.format(REMOTE_GUNICORN_CONF_FILE))
sudo("service gunicorn restart")
if files.exists(REMOTE_NGINX_CONF_FILE):
sudo('rm {}'.format(REMOTE_NGINX_CONF_FILE))
sudo('service nginx restart')
@task
def backup():
timestamp = datetime.now().strftime('%Y-%m-%d_%H%M')
dump_file = '%s-remote-%s.dmp' % (APP_NAME, timestamp)
pg_dump_cmd = 'pg_dump {} -U {} -h localhost -x -Fc -f {}' \
.format(APP_NAME, APP_NAME, dump_file)
sudo(pg_dump_cmd)
if not os.path.exists(LOCAL_BACKUPS_DIR):
local('mkdir {}'.format(LOCAL_BACKUPS_DIR))
files.get(dump_file, LOCAL_BACKUPS_DIR)
sudo("rm %s" % dump_file)
def _upload_archive():
outdir = 'dist/{}'.format(APP_NAME)
local('mkdir -p {}'.format(outdir))
local('cp requirements.txt {}'.format(outdir))
local('cp -R {} {}'.format(APP_NAME, outdir))
local('find {} -name "*.pyc" -type f -delete'.format(outdir))
local('tar czf {} {}'.format(LOCAL_ARCHIVE, outdir))
put(LOCAL_ARCHIVE, REMOTE_ARCHIVE, use_sudo=True)
local('rm -rf dist')
def _extract_archive():
if not files.exists(REMOTE_DEPLOY_DIR, use_sudo=True):
sudo('mkdir {}'.format(REMOTE_DEPLOY_DIR))
sudo('chown -R www-data:www-data {}'.format(REMOTE_DEPLOY_DIR))
sudo('chmod -R og-rwx,g+rxs {}'.format(REMOTE_DEPLOY_DIR))
sudo('rm -rf {}'.format(REMOTE_APP_DIR))
sudo('tar xmzf {} -C {} --strip-components=2'.format(REMOTE_ARCHIVE, REMOTE_DEPLOY_DIR))
sudo('rm {}'.format(REMOTE_ARCHIVE))
def _update_py_deps():
if not files.exists(REMOTE_VENV, use_sudo=True):
sudo('virtualenv {}'.format(REMOTE_VENV))
sudo('{}/bin/pip install -r {}/requirements.txt'.format(REMOTE_VENV, REMOTE_DEPLOY_DIR))
def _ensure_log_dir():
if not files.exists(REMOTE_LOG_DIR):
sudo('mkdir {}'.format(REMOTE_LOG_DIR))
sudo('chown -R www-data:www-data {}'.format(REMOTE_LOG_DIR))
sudo('chmod -R og-rwx,g+rxs {}'.format(REMOTE_LOG_DIR))
def _configure_gunicorn():
if not files.exists(REMOTE_GUNICORN_CONF_FILE):
files.upload_template(LOCAL_GUNICORN_CONF_FILE,
REMOTE_GUNICORN_CONF_FILE,
context={'app_name': APP_NAME},
template_dir=LOCAL_ETC_DIR,
use_jinja=True,
use_sudo=True)
sudo("service gunicorn restart")
def _configure_nginx():
if not files.exists(REMOTE_NGINX_CONF_FILE):
files.upload_template(LOCAL_NGINX_CONF_FILE,
REMOTE_NGINX_CONF_FILE,
context={
'app_name': APP_NAME,
'domain': DOMAIN,
'subdomain': SUBDOMAIN
},
template_dir=LOCAL_ETC_DIR,
use_jinja=True,
use_sudo=True)
sudo('service nginx reload')
|
{
"content_hash": "e299717b0c14c9f18adf585649466486",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 92,
"avg_line_length": 31.644628099173552,
"alnum_prop": 0.5682945938887438,
"repo_name": "johnwheeler/flask-live-starter",
"id": "752d1ec75d51d20fc12a0a4c17696365b6877e1a",
"size": "3829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fabfile/remote.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "550"
},
{
"name": "Python",
"bytes": "10642"
},
{
"name": "Smarty",
"bytes": "599"
}
],
"symlink_target": ""
}
|
"""
Second-Hand-Shop Project
@author: Malte Gerth
@copyright: Copyright (C) 2015 Malte Gerth
@license: MIT
@maintainer: Malte Gerth
@email: mail@malte-gerth.de
"""
from django.db import models
from django.utils.lru_cache import lru_cache
from django.utils.translation import ugettext_lazy as _
from secondhandshop_server.models import BaseModel
__author__ = "Malte Gerth <mail@malte-gerth.de>"
__copyright__ = "Copyright (C) 2015 Malte Gerth"
__license__ = "MIT"
@lru_cache(1)
def get_active_event():
return Event.objects.get_or_create(is_active=True)[0]
@lru_cache(1)
def get_active_event_id():
return Event.objects.get_or_create(is_active=True)[0].pk
def get_active_event_id_not_cached():
return Event.objects.get_or_create(is_active=True)[0].pk
class Event(BaseModel):
class Meta:
verbose_name = _("Event")
verbose_name_plural = _("Events")
db_table = "mb_shs_event"
ordering = ("booking_start_date",)
name = models.CharField(db_column="event_name", max_length=255)
is_active = models.BooleanField(_("Is active"), default=False, db_index=True)
booking_start_date = models.DateTimeField(db_column="reservation_start_date")
booking_end_date = models.DateTimeField(db_column="reservation_end_date")
max_sale_lists = models.IntegerField(default=180)
def __str__(self):
return self.name or super().__str__()
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
get_active_event.cache_clear()
get_active_event_id.cache_clear()
|
{
"content_hash": "6e572fb9b2f9b8c8edeb1ba9b1b7a405",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 81,
"avg_line_length": 28.545454545454547,
"alnum_prop": 0.6745222929936305,
"repo_name": "JanMalte/secondhandshop_server",
"id": "da974278d6f303714bb59cf3ccfae490d7cc71e8",
"size": "1570",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/events/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2000"
},
{
"name": "HTML",
"bytes": "117043"
},
{
"name": "JavaScript",
"bytes": "11561"
},
{
"name": "Python",
"bytes": "229093"
}
],
"symlink_target": ""
}
|
import sys
import os
import shutil
import subprocess
import getopt
import crypt
import pwd
from tld import get_tld
######### CONFIGURATION ############
BASE_ROOT='/home'
START_USER_NUM=5001
BASE_USER_NAME='web'
PHP_FPM_TEMPLATE='/etc/php/7.0/fpm/pool.d/www.conf'
USER_PASSWORD='qwertyuioplkjhgfdsazxcvbnm'
####################################
############ FUNCTIONS #############
######### Do not edit below ########
def usage():
"""This function simply returns the usage"""
sys.stdout.write('Usage:\n')
sys.stdout.write('%s -a|--action=<action> [-d|--domain=<domain>] [-A|--alias=<alias>] [options]\n' % sys.argv[0])
sys.stdout.write('\nParameters:\n')
sys.stdout.write('\t-a|--action=ACTION\n\t\tit is mandatory\n')
sys.stdout.write('\t-d|--domain=domain.tld\n\t\tcan be used only with [add_domain, remove_domain, add_alias, get_certs, get_info]\n')
sys.stdout.write('\t-A|--alias=alias.domain.tld\n\t\tcan be used only with [add_alias, remove_alias, get_info]\n')
sys.stdout.write('\nActions:\n')
sys.stdout.write('\tadd_domain\tAdd a new domain\n')
sys.stdout.write('\tadd_alias\tAdd a new domain alias to an existent domain\n')
sys.stdout.write('\tremove_domain\tRemove an existent domain\n')
sys.stdout.write('\tremove_alias\tRemove an existent domain alias\n')
sys.stdout.write('\tget_certs\tObtain SSL certifiate and deploy it\n')
sys.stdout.write('\tget_info\tGet information of a domain or a domain alias (username)\n')
sys.stdout.write('\nOptions:\n')
sys.stdout.write('\t-f|--fakessl\tUse self signed certificate (only usable with [add_domain, add_alias])\n')
def valid_domain(domain):
"""This function return True if the passed domain is valid, false otherwise"""
try:
get_tld(domain,fix_protocol=True)
return True
except:
return False
def tld_and_sub(domain):
"""This function returns a dictionary with tld (top level domain) and
the related subdomain, www in case no subdomain is passed"""
tld = get_tld(domain,fix_protocol=True)
if domain==tld:
return {'tld':domain,'name':'www'}
index = domain.find(tld)
return {'tld':tld,'name':domain[0:(index-1)]}
def get_next_user():
"""This function returns a dictionary with the next available username and its uid"""
buf = []
with open('/etc/passwd','r') as f:
buf = f.readlines()
idx = str(START_USER_NUM)
while True:
user = BASE_USER_NAME+idx+':'
found = False
for line in buf:
if line.startswith(user):
found = True
break
if found == True:
idx = str(int(idx)+1)
else:
return {'username':user.strip(':'),'uid':int(idx)}
def add_new_user(username,uid,homedir):
"""This function adds a new system user with specified parameters"""
res = subprocess.run([
'useradd',
'--comment="WEB_USER_'+str(uid)+',,,"',
'--home-dir='+homedir,
'--no-log-init',
'--create-home',
'--shell=/bin/bash',
'--uid='+str(uid),
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error adding user %s with uid %d: %s\n' % (username,uid,res.stderr))
sys.exit(1)
enc_password = crypt.crypt(USER_PASSWORD,crypt.mksalt(crypt.METHOD_SHA512))
res = subprocess.run([
'usermod',
'-p',
enc_password,
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error setting password for user %s: %s\n' % (username,res.stderr))
sys.exit(1)
def remove_user(homedir):
"""This function removes the user which domain belongs to"""
buf = []
with open('/etc/passwd','r') as f:
buf = f.readlines()
username = ''
for line in buf:
if ':'+homedir+':' in line:
username = line.split(':')[0]
break
if username != '':
res = subprocess.run([
'userdel',
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error removing user %s: %s\n' % (username,res.stderr))
sys.exit(1)
def remove_domain_folder(homedir):
"""This function removes the home directory of the domain"""
if os.path.isdir(homedir):
res = subprocess.run([
'rm',
'-rf',
homedir], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error removing domain folder %s\n' % homedir)
sys.exit(1)
def lock_password(username):
"""This function lock the password for the user"""
res = subprocess.run([
'passwd',
'-l',
username], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout.write('Error locking password to user %s: %s\n' % (username,res.stderr))
sys.exit(1)
def create_subfolders(username,homedir):
"""This function creates subfolders of domain directory"""
dirname = os.path.join(homedir,'public_html')
if not os.path.isdir(dirname):
os.mkdir(dirname)
shutil.chown(dirname,username,username)
dirname = os.path.join(homedir,'tmp')
if not os.path.isdir(dirname):
os.mkdir(dirname)
shutil.chown(dirname,username,username)
dirname = os.path.join(homedir,'logs')
if not os.path.isdir(dirname):
os.mkdir(dirname)
shutil.chown(dirname,'root','root')
def create_php_pool(username, domain, homedir):
"""This function creates a php pool configuration file"""
if not os.path.isfile(PHP_FPM_TEMPLATE):
sys.stdout.write('No php fpm template found (%s)!\n' % PHP_FPM_TEMPLATE)
sys.exit(1)
filename = os.path.join('/etc/php/7.0/fpm/pool.d/',domain+'.conf')
if os.path.isfile(filename):
sys.stdout.write('PHP configuration file already exists: %s\n' % filename)
sys.exit(1)
lines = []
with open(PHP_FPM_TEMPLATE,'r') as f:
lines = f.readlines()
with open(filename,'w') as f:
for l in lines:
if l.startswith('user = www-data'):
f.write(l.replace('www-data',username))
continue
if l.startswith('group = www-data'):
f.write(l.replace('www-data',username))
continue
if l.startswith('[www]'):
f.write(l.replace('www',domain))
continue
if l.startswith('listen = '):
f.write('listen = /var/run/php/php7.0-fpm_'+domain+'.sock\n')
continue
if l.startswith(';env[TMP]'):
f.write('env[TMP] = '+os.path.join(homedir,'tmp')+'\n')
continue
if l.startswith(';env[TMPDIR]'):
f.write('env[TMPDIR] = '+os.path.join(homedir,'tmp')+'\n')
continue
if l.startswith(';env[TEMP]'):
f.write('env[TEMP] = '+os.path.join(homedir,'tmp')+'\n')
continue
f.write(l)
def remove_php_pool(domain):
"""This function removes the php pool of the domain"""
filename = '/etc/php/7.0/fpm/pool.d/'+domain+'.conf'
if os.path.isfile(filename):
os.unlink(filename)
def domains_in_virtualhost(domain):
"""This function returns the list of domains configured in the virtualhost"""
buf = []
with open('/etc/nginx/sites-available/'+domain,'r') as f:
buf = f.readlines()
domains = []
for line in buf:
if ' server_name ' in line:
domains = line.strip().strip(';').split()[1:]
break
return domains
def check_update_ssl_certs(domains):
"""This function get ssl certificates for all domains in virtualhost and adjust it"""
if len(domains)==0:
sys.stdout.write('No domain provided to certbot!\n')
return
domains_list = []
for d in domains:
domains_list.append('-d')
domains_list.append(d.strip())
res = subprocess.run([
'certbot',
'certonly',
'--keep-until-expiring',
'--expand',
'--webroot',
'--webroot-path',
'/var/www/html']+domains_list, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if not os.path.islink('/etc/letsencrypt/live/'+domains[0].strip()+'/fullchain.pem'):
sys.stdout.write('Missing SSL certificate %s\n' % '/etc/letsencrypt/live/'+domains[0].strip()+'/fullchain.pem')
sys.stdout.write('Look at %s for more information about\n' % '/var/log/letsencrypt/letsencrypt.log')
return
buf = []
with open('/etc/letsencrypt/renewal/'+domains[0].strip()+'.conf','r') as f:
buf = f.readlines()
for d in domains:
for line in buf:
if line.startswith(d.strip()+' ='):
found = True
break
if not found:
with open('/etc/letsencrypt/renewal/'+d.strip()+'.conf','a') as f:
f.write(d.strip()+' = /var/www/html\n')
domain_parts = tld_and_sub(domains[0].strip())
buf = []
with open('/etc/nginx/sites-available/'+domain_parts['name']+'.'+domain_parts['tld'],'r') as f:
buf = f.readlines()
with open('/etc/nginx/sites-available/'+domain_parts['name']+'.'+domain_parts['tld'],'w') as f:
for line in buf:
if 'ssl_certificate ' in line:
f.write(' ssl_certificate /etc/letsencrypt/live/'+domains[0].strip()+'/fullchain.pem;\n')
continue
if 'ssl_certificate_key ' in line:
f.write(' ssl_certificate_key /etc/letsencrypt/live/'+domains[0].strip()+'/privkey.pem;\n')
continue
f.write(line)
def remove_ssl_certs(domain):
"""This function removes all SSL certificates of a domain"""
if os.path.isdir('/etc/letsencrypt/live/'+domain):
shutil.rmtree('/etc/letsencrypt/live/'+domain)
if os.path.isdir('/etc/letsencrypt/archive/'+domain):
shutil.rmtree('/etc/letsencrypt/archive/'+domain)
if os.path.isfile('/etc/letsencrypt/renewal/'+domain+'.conf'):
os.unlink('/etc/letsencrypt/renewal/'+domain+'.conf')
def create_nginx_virtualhost(domain,homedir):
"""This function creates the NGINX virtualhost"""
filename = '/etc/nginx/sites-available/'+domain
dst_filename = '/etc/nginx/sites-enabled/'+domain
if os.path.isfile(filename):
sys.stdout.write('Virtualhost configuration already exists: %s\n' % filename)
sys.exit(1)
domain_parts = tld_and_sub(domain)
with open(filename,'w') as f:
f.write('server {\n')
f.write(' listen 80;\n')
if domain_parts['name'] == 'www':
f.write(' server_name '+domain_parts['tld']+' '+domain_parts['name']+'.'+domain_parts['tld']+';\n');
else:
f.write(' server_name '+domain_parts['name']+'.'+domain_parts['tld']+';\n')
f.write(' return 301 https://'+domain_parts['name']+'.'+domain_parts['tld']+'$request_uri;\n')
f.write('}\n')
f.write('server {\n')
f.write(' server_name '+domain_parts['name']+'.'+domain_parts['tld']+';\n')
f.write(' listen 443 ssl http2;\n')
f.write(' access_log '+os.path.join(homedir,'logs','nginx.access.log')+';\n')
f.write(' error_log '+os.path.join(homedir,'logs','nginx.error.log')+';\n')
f.write(' root '+os.path.join(homedir,'public_html')+';\n')
f.write(' set $php_sock_name '+domain_parts['name']+'.'+domain_parts['tld']+';\n')
f.write(' include /etc/nginx/global/common.conf;\n')
f.write(' include /etc/nginx/global/wordpress.conf;\n')
f.write(' ssl_certificate /etc/nginx/certs/server.crt;\n')
f.write(' ssl_certificate_key /etc/nginx/certs/server.key;\n')
f.write(' include /etc/nginx/global/ssl.conf;\n')
f.write('}\n')
os.symlink(filename,dst_filename)
def remove_nginx_virtualhost(domain):
"""This function removes nginx virtualhost of a domain"""
if os.path.islink('/etc/nginx/sites-enabled/'+domain):
os.unlink('/etc/nginx/sites-enabled/'+domain)
if os.path.isfile('/etc/nginx/sites-available/'+domain):
os.unlink('/etc/nginx/sites-available/'+domain)
def reload_services():
"""This function reloads configurations of PHP-FPM and NGINX services"""
res = subprocess.run([
'/etc/init.d/php7.0-fpm',
'reload'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout('Unable to reload PHP: %s\n' % res.stderr)
sys.exit(1)
res = subprocess.run([
'/usr/sbin/nginx',
'-s',
'reload'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if res.stderr != b'':
sys.stdout('Unable to reload NGINX: %s\n' % res.stderr)
sys.exit(1)
def create_symlink(alias_domain_dir,domain_dir):
"""This function creates symlink for the alias domain"""
os.symlink(domain_dir,alias_domain_dir)
def remove_symlink(alias_domain_dir):
"""This function removes symlink for the alias domain"""
os.unlink(alias_domain_dir)
def add_nginx_virtualhost_alias(domain, alias_domain):
"""This function adds a new alias to NGINX virtualhost"""
buf = []
with open('/etc/nginx/sites-available/'+domain,'r') as f:
buf = f.readlines()
with open('/etc/nginx/sites-available/'+domain,'w') as f:
for line in buf:
if ' server_name ' in line:
chunks = line.strip().strip(';').split()[1:]
if alias_domain not in chunks:
chunks.append(alias_domain)
line = ' server_name '+' '.join(chunks)+';\n'
f.write(line)
def remove_nginx_virtualhost_alias(domain, alias_domain):
"""This function removes an alias from NGINX virtualhost"""
buf = []
with open('/etc/nginx/sites-available/'+domain,'r') as f:
buf = f.readlines()
with open('/etc/nginx/sites-available/'+domain,'w') as f:
for line in buf:
if ' server_name ' in line:
chunks = line.strip().strip(';').split()[1:]
if alias_domain in chunks:
chunks.remove(alias_domain)
line = ' server_name '+' '.join(chunks)+';\n'
f.write(line)
def get_alias_parent(alias_domain_dir):
"""This function returns the parent domain of an alias domain"""
domain_dir = os.readlink(alias_domain_dir)
domain = os.path.basename(domain_dir)
return domain
def remove_alias_ssl_certs(domain, alias_domain):
"""This function removes the alias_domain from the letsencrypt renew process"""
buf = []
with open('/etc/letsencrypt/renewal/'+domain+'.conf', 'r') as f:
buf = f.readlines()
with open('/etc/letsencrypt/renewal/'+domain+'.conf', 'w') as f:
for line in buf:
if line.startswith(alias_domain+' ='):
continue
f.write(line)
####################################
######### MAIN STARTS HERE #########
def main():
if os.getuid() != 0:
sys.stdout.write('This program must be executed as root\n')
sys.exit(1)
try:
opts, args = getopt.getopt(sys.argv[1:], "ha:d:A:f", ["help", "action=", "domain=", "alias=", "fakessl"])
except getopt.GetoptError as err:
usage()
sys.exit(2)
domain = None
alias_domain = None
action = None
ssl_fake = False
show_info = False
if len(opts) == 0:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-h", "--help"):
usage()
sys.exit()
elif o in ("-a", "--action"):
action = a
if action not in ('add_domain','add_alias','remove_domain','remove_alias','get_certs','get_info'):
sys.stdout.write("Unknown action %s\n" % action)
usage()
sys.exit(1)
elif o in ("-d", "--domain"):
domain = a
elif o in ("-A", "--alias"):
alias_domain = a
elif o in ("-f", "--fakessl"):
ssl_fake = True
else:
sys.stdout.write('Unknown option %s\n' % o)
usage()
sys.exit(1)
if action == 'get_info':
if domain == None and alias_domain == None:
sys.stdout.write('Missing domain or alias domain\n')
sys.exit(1)
if domain != None and alias_domain != None:
sys.stdout.write('Please specify only a domain or an alias domain\n')
sys.exit(1)
# check if domain already exists
if domain != None:
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# check if alias domain already exists
if alias_domain != None:
alias_domain_parts = tld_and_sub(alias_domain)
base_alias_domain_dir = os.path.join(BASE_ROOT,alias_domain_parts['tld'])
child_alias_domain_dir = os.path.join(base_alias_domain_dir,alias_domain_parts['name']+'.'+alias_domain_parts['tld'])
alias_domain = alias_domain_parts['name']+'.'+alias_domain_parts['tld']
if not (os.path.isdir(child_alias_domain_dir) or os.path.islink(child_alias_domain_dir)):
sys.stdout.write('Alias domain %s does not exist at %s\n' % (alias_domain,child_alias_domain_dir))
sys.exit(1)
if domain != None:
sys.stdout.write(pwd.getpwuid(os.stat(child_domain_dir).st_uid).pw_name+'\n')
sys.exit(0)
elif alias_domain != None:
sys.stdout.write(pwd.getpwuid(os.stat(child_alias_domain_dir).st_uid).pw_name+'\n')
sys.exit(0)
elif action == 'add_domain':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s already exists at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# add new user
if not os.path.isdir(base_domain_dir):
os.mkdir(base_domain_dir)
user = get_next_user()
add_new_user(user['username'],user['uid'],child_domain_dir)
# lock user password
#lock_password(user['username'])
# create additional folders
create_subfolders(user['username'],child_domain_dir)
# create PHP pool
create_php_pool(user['username'],domain,child_domain_dir)
# create NGINX virtualhost
create_nginx_virtualhost(domain,child_domain_dir)
# obtain SSL certificates from letsencrypt
if not ssl_fake:
domains = domains_in_virtualhost(domain)
check_update_ssl_certs(domains)
# reload services (nginx + php-fpm)
reload_services()
elif action == 'add_alias':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
if alias_domain == None:
sys.stdout.write('Missing domain alias\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# check if alias domain already exists
alias_domain_parts = tld_and_sub(alias_domain)
base_alias_domain_dir = os.path.join(BASE_ROOT,alias_domain_parts['tld'])
child_alias_domain_dir = os.path.join(base_alias_domain_dir,alias_domain_parts['name']+'.'+alias_domain_parts['tld'])
alias_domain = alias_domain_parts['name']+'.'+alias_domain_parts['tld']
if os.path.isdir(child_alias_domain_dir) or os.path.islink(child_alias_domain_dir):
sys.stdout.write('Alias domain %s already exists at %s\n' % (alias_domain,child_alias_domain_dir))
sys.exit(1)
# add base folder if not exists
if not os.path.isdir(base_domain_dir):
os.mkdir(base_domain_dir)
# create symlink
create_symlink(child_alias_domain_dir,child_domain_dir)
# add NGINX virtualhost alias
add_nginx_virtualhost_alias(domain, alias_domain)
# obtain SSL certificates from letsencrypt
if not ssl_fake:
domains = domains_in_virtualhost(domain)
check_update_ssl_certs(domains)
# reload services (nginx + php-fpm)
reload_services()
elif action == 'remove_domain':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
# remove php pool
remove_php_pool(domain)
# remove ssl certificates
remove_ssl_certs(domain)
# remove nginx virtualhost
remove_nginx_virtualhost(domain)
# reload services (nginx + php-fpm)
reload_services()
# remove domain folder
remove_domain_folder(child_domain_dir)
# remove user if present
remove_user(child_domain_dir)
elif action == 'remove_alias':
if alias_domain == None:
sys.stdout.write('Missing domain alias\n')
sys.exit(1)
# check if alias domain already exists
alias_domain_parts = tld_and_sub(alias_domain)
base_alias_domain_dir = os.path.join(BASE_ROOT,alias_domain_parts['tld'])
child_alias_domain_dir = os.path.join(base_alias_domain_dir,alias_domain_parts['name']+'.'+alias_domain_parts['tld'])
alias_domain = alias_domain_parts['name']+'.'+alias_domain_parts['tld']
if not os.path.islink(child_alias_domain_dir):
sys.stdout.write('Alias domain %s does not exist at %s\n' % (alias_domain,child_alias_domain_dir))
sys.exit(1)
# get alias parent
domain = get_alias_parent(child_alias_domain_dir)
# remove domain folder
remove_symlink(child_alias_domain_dir)
# remove ssl certificates
remove_alias_ssl_certs(domain, alias_domain)
# remove nginx virtualhost
remove_nginx_virtualhost_alias(domain, alias_domain)
# reload services (nginx + php-fpm)
reload_services()
elif action == 'get_certs':
if domain == None:
sys.stdout.write('Missing domain\n')
sys.exit(1)
# check if domain already exists
domain_parts = tld_and_sub(domain)
base_domain_dir = os.path.join(BASE_ROOT,domain_parts['tld'])
child_domain_dir = os.path.join(base_domain_dir,domain_parts['name']+'.'+domain_parts['tld'])
domain = domain_parts['name']+'.'+domain_parts['tld']
if not os.path.isdir(child_domain_dir):
sys.stdout.write('Domain %s does not exist at %s\n' % (domain,child_domain_dir))
sys.exit(1)
domains = domains_in_virtualhost(domain)
check_update_ssl_certs(domains)
# reload services (nginx + php-fpm)
reload_services()
if __name__ == "__main__":
main()
|
{
"content_hash": "7d7e3bb24ab0803413de6f3efb8dc68e",
"timestamp": "",
"source": "github",
"line_count": 612,
"max_line_length": 137,
"avg_line_length": 40.1062091503268,
"alnum_prop": 0.5849256467712365,
"repo_name": "matteomattei/servermaintenance",
"id": "e8d5c7372342281cf873b6f603544a2589d902b0",
"size": "24644",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Debian9/lemp_manager.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "74288"
},
{
"name": "Shell",
"bytes": "95074"
}
],
"symlink_target": ""
}
|
"""
Linear Discriminant Analysis and Quadratic Discriminant Analysis
"""
# Authors: Clemens Brunner
# Martin Billinger
# Matthieu Perrot
# Mathieu Blondel
# License: BSD 3-Clause
import warnings
import numpy as np
from scipy import linalg
from scipy.special import expit
from .base import BaseEstimator, TransformerMixin, ClassifierMixin
from .linear_model._base import LinearClassifierMixin
from .covariance import ledoit_wolf, empirical_covariance, shrunk_covariance
from .utils.multiclass import unique_labels
from .utils.validation import check_is_fitted
from .utils.multiclass import check_classification_targets
from .utils.extmath import softmax
from .preprocessing import StandardScaler
__all__ = ['LinearDiscriminantAnalysis', 'QuadraticDiscriminantAnalysis']
def _cov(X, shrinkage=None, covariance_estimator=None):
"""Estimate covariance matrix (using optional covariance_estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
shrinkage : {'empirical', 'auto'} or float, default=None
Shrinkage parameter, possible values:
- None or 'empirical': no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator`
is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance``.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
s : ndarray of shape (n_features, n_features)
Estimated covariance matrix.
"""
if covariance_estimator is None:
shrinkage = "empirical" if shrinkage is None else shrinkage
if isinstance(shrinkage, str):
if shrinkage == 'auto':
sc = StandardScaler() # standardize features
X = sc.fit_transform(X)
s = ledoit_wolf(X)[0]
# rescale
s = sc.scale_[:, np.newaxis] * s * sc.scale_[np.newaxis, :]
elif shrinkage == 'empirical':
s = empirical_covariance(X)
else:
raise ValueError('unknown shrinkage parameter')
elif isinstance(shrinkage, float) or isinstance(shrinkage, int):
if shrinkage < 0 or shrinkage > 1:
raise ValueError('shrinkage parameter must be between 0 and 1')
s = shrunk_covariance(empirical_covariance(X), shrinkage)
else:
raise TypeError('shrinkage must be a float or a string')
else:
if shrinkage is not None and shrinkage != 0:
raise ValueError("covariance_estimator and shrinkage parameters "
"are not None. Only one of the two can be set.")
covariance_estimator.fit(X)
if not hasattr(covariance_estimator, 'covariance_'):
raise ValueError("%s does not have a covariance_ attribute" %
covariance_estimator.__class__.__name__)
s = covariance_estimator.covariance_
return s
def _class_means(X, y):
"""Compute class means.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
Returns
-------
means : array-like of shape (n_classes, n_features)
Class means.
"""
classes, y = np.unique(y, return_inverse=True)
cnt = np.bincount(y)
means = np.zeros(shape=(len(classes), X.shape[1]))
np.add.at(means, y, X)
means /= cnt[:, None]
return means
def _class_cov(X, y, priors, shrinkage=None, covariance_estimator=None):
"""Compute weighted within-class covariance matrix.
The per-class covariance are weighted by the class priors.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
priors : array-like of shape (n_classes,)
Class priors.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` is not None.
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
If None, the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Returns
-------
cov : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix
"""
classes = np.unique(y)
cov = np.zeros(shape=(X.shape[1], X.shape[1]))
for idx, group in enumerate(classes):
Xg = X[y == group, :]
cov += priors[idx] * np.atleast_2d(
_cov(Xg, shrinkage, covariance_estimator))
return cov
class LinearDiscriminantAnalysis(LinearClassifierMixin,
TransformerMixin,
BaseEstimator):
"""Linear Discriminant Analysis
A classifier with a linear decision boundary, generated by fitting class
conditional densities to the data and using Bayes' rule.
The model fits a Gaussian density to each class, assuming that all classes
share the same covariance matrix.
The fitted model can also be used to reduce the dimensionality of the input
by projecting it to the most discriminative directions, using the
`transform` method.
.. versionadded:: 0.17
*LinearDiscriminantAnalysis*.
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
solver : {'svd', 'lsqr', 'eigen'}, default='svd'
Solver to use, possible values:
- 'svd': Singular value decomposition (default).
Does not compute the covariance matrix, therefore this solver is
recommended for data with a large number of features.
- 'lsqr': Least squares solution.
Can be combined with shrinkage or custom covariance estimator.
- 'eigen': Eigenvalue decomposition.
Can be combined with shrinkage or custom covariance estimator.
shrinkage : 'auto' or float, default=None
Shrinkage parameter, possible values:
- None: no shrinkage (default).
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
This should be left to None if `covariance_estimator` is used.
Note that shrinkage works only with 'lsqr' and 'eigen' solvers.
priors : array-like of shape (n_classes,), default=None
The class prior probabilities. By default, the class proportions are
inferred from the training data.
n_components : int, default=None
Number of components (<= min(n_classes - 1, n_features)) for
dimensionality reduction. If None, will be set to
min(n_classes - 1, n_features). This parameter only affects the
`transform` method.
store_covariance : bool, default=False
If True, explicitely compute the weighted within-class covariance
matrix when solver is 'svd'. The matrix is always computed
and stored for the other solvers.
.. versionadded:: 0.17
tol : float, default=1.0e-4
Absolute threshold for a singular value of X to be considered
significant, used to estimate the rank of X. Dimensions whose
singular values are non-significant are discarded. Only used if
solver is 'svd'.
.. versionadded:: 0.17
covariance_estimator : covariance estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying on the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in :mod:`sklearn.covariance`.
if None the shrinkage parameter drives the estimate.
This should be left to None if `shrinkage` is used.
Note that `covariance_estimator` works only with 'lsqr' and 'eigen'
solvers.
.. versionadded:: 0.24
Attributes
----------
coef_ : ndarray of shape (n_features,) or (n_classes, n_features)
Weight vector(s).
intercept_ : ndarray of shape (n_classes,)
Intercept term.
covariance_ : array-like of shape (n_features, n_features)
Weighted within-class covariance matrix. It corresponds to
`sum_k prior_k * C_k` where `C_k` is the covariance matrix of the
samples in class `k`. The `C_k` are estimated using the (potentially
shrunk) biased estimator of covariance. If solver is 'svd', only
exists when `store_covariance` is True.
explained_variance_ratio_ : ndarray of shape (n_components,)
Percentage of variance explained by each of the selected components.
If ``n_components`` is not set then all components are stored and the
sum of explained variances is equal to 1.0. Only available when eigen
or svd solver is used.
means_ : array-like of shape (n_classes, n_features)
Class-wise means.
priors_ : array-like of shape (n_classes,)
Class priors (sum to 1).
scalings_ : array-like of shape (rank, n_classes - 1)
Scaling of the features in the space spanned by the class centroids.
Only available for 'svd' and 'eigen' solvers.
xbar_ : array-like of shape (n_features,)
Overall mean. Only present if solver is 'svd'.
classes_ : array-like of shape (n_classes,)
Unique class labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
See Also
--------
QuadraticDiscriminantAnalysis : Quadratic Discriminant Analysis.
Examples
--------
>>> import numpy as np
>>> from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = LinearDiscriminantAnalysis()
>>> clf.fit(X, y)
LinearDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(self, solver='svd', shrinkage=None, priors=None,
n_components=None, store_covariance=False, tol=1e-4,
covariance_estimator=None):
self.solver = solver
self.shrinkage = shrinkage
self.priors = priors
self.n_components = n_components
self.store_covariance = store_covariance # used only in svd solver
self.tol = tol # used only in svd solver
self.covariance_estimator = covariance_estimator
def _solve_lsqr(self, X, y, shrinkage, covariance_estimator):
"""Least squares solver.
The least squares solver computes a straightforward solution of the
optimal decision rule based directly on the discriminant functions. It
can only be used for classification (with any covariance estimator),
because
estimation of eigenvectors is not performed. Therefore, dimensionality
reduction with the transform is not supported.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_classes)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage parameter.
Shrinkage parameter is ignored if `covariance_estimator` i
not None
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Notes
-----
This solver is based on [1]_, section 2.6.2, pp. 39-41.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage,
covariance_estimator)
self.coef_ = linalg.lstsq(self.covariance_, self.means_.T)[0].T
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_eigen(self, X, y, shrinkage,
covariance_estimator):
"""Eigenvalue solver.
The eigenvalue solver computes the optimal solution of the Rayleigh
coefficient (basically the ratio of between class scatter to within
class scatter). This solver supports both classification and
dimensionality reduction (with any covariance estimator).
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
shrinkage : 'auto', float or None
Shrinkage parameter, possible values:
- None: no shrinkage.
- 'auto': automatic shrinkage using the Ledoit-Wolf lemma.
- float between 0 and 1: fixed shrinkage constant.
Shrinkage parameter is ignored if `covariance_estimator` i
not None
covariance_estimator : estimator, default=None
If not None, `covariance_estimator` is used to estimate
the covariance matrices instead of relying the empirical
covariance estimator (with potential shrinkage).
The object should have a fit method and a ``covariance_`` attribute
like the estimators in sklearn.covariance.
if None the shrinkage parameter drives the estimate.
.. versionadded:: 0.24
Notes
-----
This solver is based on [1]_, section 3.8.3, pp. 121-124.
References
----------
.. [1] R. O. Duda, P. E. Hart, D. G. Stork. Pattern Classification
(Second Edition). John Wiley & Sons, Inc., New York, 2001. ISBN
0-471-05669-3.
"""
self.means_ = _class_means(X, y)
self.covariance_ = _class_cov(X, y, self.priors_, shrinkage,
covariance_estimator)
Sw = self.covariance_ # within scatter
St = _cov(X, shrinkage, covariance_estimator) # total scatter
Sb = St - Sw # between scatter
evals, evecs = linalg.eigh(Sb, Sw)
self.explained_variance_ratio_ = np.sort(evals / np.sum(evals)
)[::-1][:self._max_components]
evecs = evecs[:, np.argsort(evals)[::-1]] # sort eigenvectors
self.scalings_ = evecs
self.coef_ = np.dot(self.means_, evecs).dot(evecs.T)
self.intercept_ = (-0.5 * np.diag(np.dot(self.means_, self.coef_.T)) +
np.log(self.priors_))
def _solve_svd(self, X, y):
"""SVD solver.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,) or (n_samples, n_targets)
Target values.
"""
n_samples, n_features = X.shape
n_classes = len(self.classes_)
self.means_ = _class_means(X, y)
if self.store_covariance:
self.covariance_ = _class_cov(X, y, self.priors_)
Xc = []
for idx, group in enumerate(self.classes_):
Xg = X[y == group, :]
Xc.append(Xg - self.means_[idx])
self.xbar_ = np.dot(self.priors_, self.means_)
Xc = np.concatenate(Xc, axis=0)
# 1) within (univariate) scaling by with classes std-dev
std = Xc.std(axis=0)
# avoid division by zero in normalization
std[std == 0] = 1.
fac = 1. / (n_samples - n_classes)
# 2) Within variance scaling
X = np.sqrt(fac) * (Xc / std)
# SVD of centered (within)scaled data
U, S, Vt = linalg.svd(X, full_matrices=False)
rank = np.sum(S > self.tol)
# Scaling of within covariance is: V' 1/S
scalings = (Vt[:rank] / std).T / S[:rank]
# 3) Between variance scaling
# Scale weighted centers
X = np.dot(((np.sqrt((n_samples * self.priors_) * fac)) *
(self.means_ - self.xbar_).T).T, scalings)
# Centers are living in a space with n_classes-1 dim (maximum)
# Use SVD to find projection in the space spanned by the
# (n_classes) centers
_, S, Vt = linalg.svd(X, full_matrices=0)
if self._max_components == 0:
self.explained_variance_ratio_ = np.empty((0,), dtype=S.dtype)
else:
self.explained_variance_ratio_ = (S**2 / np.sum(
S**2))[:self._max_components]
rank = np.sum(S > self.tol * S[0])
self.scalings_ = np.dot(scalings, Vt.T[:, :rank])
coef = np.dot(self.means_ - self.xbar_, self.scalings_)
self.intercept_ = (-0.5 * np.sum(coef ** 2, axis=1) +
np.log(self.priors_))
self.coef_ = np.dot(coef, self.scalings_.T)
self.intercept_ -= np.dot(self.xbar_, self.coef_.T)
def fit(self, X, y):
"""Fit LinearDiscriminantAnalysis model according to the given
training data and parameters.
.. versionchanged:: 0.19
*store_covariance* has been moved to main constructor.
.. versionchanged:: 0.19
*tol* has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training data.
y : array-like of shape (n_samples,)
Target values.
"""
X, y = self._validate_data(X, y, ensure_min_samples=2, estimator=self,
dtype=[np.float64, np.float32])
self.classes_ = unique_labels(y)
n_samples, _ = X.shape
n_classes = len(self.classes_)
if n_samples == n_classes:
raise ValueError("The number of samples must be more "
"than the number of classes.")
if self.priors is None: # estimate priors from sample
_, y_t = np.unique(y, return_inverse=True) # non-negative ints
self.priors_ = np.bincount(y_t) / float(len(y))
else:
self.priors_ = np.asarray(self.priors)
if (self.priors_ < 0).any():
raise ValueError("priors must be non-negative")
if not np.isclose(self.priors_.sum(), 1.0):
warnings.warn("The priors do not sum to 1. Renormalizing",
UserWarning)
self.priors_ = self.priors_ / self.priors_.sum()
# Maximum number of components no matter what n_components is
# specified:
max_components = min(len(self.classes_) - 1, X.shape[1])
if self.n_components is None:
self._max_components = max_components
else:
if self.n_components > max_components:
raise ValueError(
"n_components cannot be larger than min(n_features, "
"n_classes - 1)."
)
self._max_components = self.n_components
if self.solver == 'svd':
if self.shrinkage is not None:
raise NotImplementedError('shrinkage not supported')
if self.covariance_estimator is not None:
raise ValueError(
'covariance estimator '
'is not supported '
'with svd solver. Try another solver')
self._solve_svd(X, y)
elif self.solver == 'lsqr':
self._solve_lsqr(X, y, shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator)
elif self.solver == 'eigen':
self._solve_eigen(X, y,
shrinkage=self.shrinkage,
covariance_estimator=self.covariance_estimator)
else:
raise ValueError("unknown solver {} (valid solvers are 'svd', "
"'lsqr', and 'eigen').".format(self.solver))
if self.classes_.size == 2: # treat binary case as a special case
self.coef_ = np.array(self.coef_[1, :] - self.coef_[0, :], ndmin=2,
dtype=X.dtype)
self.intercept_ = np.array(self.intercept_[1] - self.intercept_[0],
ndmin=1, dtype=X.dtype)
return self
def transform(self, X):
"""Project data to maximize class separation.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
X_new : ndarray of shape (n_samples, n_components)
Transformed data.
"""
if self.solver == 'lsqr':
raise NotImplementedError("transform not implemented for 'lsqr' "
"solver (use 'svd' or 'eigen').")
check_is_fitted(self)
X = self._validate_data(X, reset=False)
if self.solver == 'svd':
X_new = np.dot(X - self.xbar_, self.scalings_)
elif self.solver == 'eigen':
X_new = np.dot(X, self.scalings_)
return X_new[:, :self._max_components]
def predict_proba(self, X):
"""Estimate probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated probabilities.
"""
check_is_fitted(self)
decision = self.decision_function(X)
if self.classes_.size == 2:
proba = expit(decision)
return np.vstack([1-proba, proba]).T
else:
return softmax(decision)
def predict_log_proba(self, X):
"""Estimate log probability.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Input data.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Estimated log probabilities.
"""
prediction = self.predict_proba(X)
prediction[prediction == 0.0] += np.finfo(prediction.dtype).tiny
return np.log(prediction)
def decision_function(self, X):
"""Apply decision function to an array of samples.
The decision function is equal (up to a constant factor) to the
log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
classification setting this instead corresponds to the difference
`log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is (n_samples,), giving the
log likelihood ratio of the positive class.
"""
# Only override for the doc
return super().decision_function(X)
class QuadraticDiscriminantAnalysis(ClassifierMixin, BaseEstimator):
"""Quadratic Discriminant Analysis
A classifier with a quadratic decision boundary, generated
by fitting class conditional densities to the data
and using Bayes' rule.
The model fits a Gaussian density to each class.
.. versionadded:: 0.17
*QuadraticDiscriminantAnalysis*
Read more in the :ref:`User Guide <lda_qda>`.
Parameters
----------
priors : ndarray of shape (n_classes,), default=None
Class priors. By default, the class proportions are inferred from the
training data.
reg_param : float, default=0.0
Regularizes the per-class covariance estimates by transforming S2 as
``S2 = (1 - reg_param) * S2 + reg_param * np.eye(n_features)``,
where S2 corresponds to the `scaling_` attribute of a given class.
store_covariance : bool, default=False
If True, the class covariance matrices are explicitely computed and
stored in the `self.covariance_` attribute.
.. versionadded:: 0.17
tol : float, default=1.0e-4
Absolute threshold for a singular value to be considered significant,
used to estimate the rank of `Xk` where `Xk` is the centered matrix
of samples in class k. This parameter does not affect the
predictions. It only controls a warning that is raised when features
are considered to be colinear.
.. versionadded:: 0.17
Attributes
----------
covariance_ : list of len n_classes of ndarray \
of shape (n_features, n_features)
For each class, gives the covariance matrix estimated using the
samples of that class. The estimations are unbiased. Only present if
`store_covariance` is True.
means_ : array-like of shape (n_classes, n_features)
Class-wise means.
priors_ : array-like of shape (n_classes,)
Class priors (sum to 1).
rotations_ : list of len n_classes of ndarray of shape (n_features, n_k)
For each class k an array of shape (n_features, n_k), where
``n_k = min(n_features, number of elements in class k)``
It is the rotation of the Gaussian distribution, i.e. its
principal axis. It corresponds to `V`, the matrix of eigenvectors
coming from the SVD of `Xk = U S Vt` where `Xk` is the centered
matrix of samples from class k.
scalings_ : list of len n_classes of ndarray of shape (n_k,)
For each class, contains the scaling of
the Gaussian distributions along its principal axes, i.e. the
variance in the rotated coordinate system. It corresponds to `S^2 /
(n_samples - 1)`, where `S` is the diagonal matrix of singular values
from the SVD of `Xk`, where `Xk` is the centered matrix of samples
from class k.
classes_ : ndarray of shape (n_classes,)
Unique class labels.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
Examples
--------
>>> from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = QuadraticDiscriminantAnalysis()
>>> clf.fit(X, y)
QuadraticDiscriminantAnalysis()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See Also
--------
LinearDiscriminantAnalysis : Linear Discriminant Analysis.
"""
def __init__(self, *, priors=None, reg_param=0., store_covariance=False,
tol=1.0e-4):
self.priors = np.asarray(priors) if priors is not None else None
self.reg_param = reg_param
self.store_covariance = store_covariance
self.tol = tol
def fit(self, X, y):
"""Fit the model according to the given training data and parameters.
.. versionchanged:: 0.19
``store_covariances`` has been moved to main constructor as
``store_covariance``
.. versionchanged:: 0.19
``tol`` has been moved to main constructor.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like of shape (n_samples,)
Target values (integers)
"""
X, y = self._validate_data(X, y)
check_classification_targets(y)
self.classes_, y = np.unique(y, return_inverse=True)
n_samples, n_features = X.shape
n_classes = len(self.classes_)
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
if self.priors is None:
self.priors_ = np.bincount(y) / float(n_samples)
else:
self.priors_ = self.priors
cov = None
store_covariance = self.store_covariance
if store_covariance:
cov = []
means = []
scalings = []
rotations = []
for ind in range(n_classes):
Xg = X[y == ind, :]
meang = Xg.mean(0)
means.append(meang)
if len(Xg) == 1:
raise ValueError('y has only 1 sample in class %s, covariance '
'is ill defined.' % str(self.classes_[ind]))
Xgc = Xg - meang
# Xgc = U * S * V.T
_, S, Vt = np.linalg.svd(Xgc, full_matrices=False)
rank = np.sum(S > self.tol)
if rank < n_features:
warnings.warn("Variables are collinear")
S2 = (S ** 2) / (len(Xg) - 1)
S2 = ((1 - self.reg_param) * S2) + self.reg_param
if self.store_covariance or store_covariance:
# cov = V * (S^2 / (n-1)) * V.T
cov.append(np.dot(S2 * Vt.T, Vt))
scalings.append(S2)
rotations.append(Vt.T)
if self.store_covariance or store_covariance:
self.covariance_ = cov
self.means_ = np.asarray(means)
self.scalings_ = scalings
self.rotations_ = rotations
return self
def _decision_function(self, X):
# return log posterior, see eq (4.12) p. 110 of the ESL.
check_is_fitted(self)
X = self._validate_data(X, reset=False)
norm2 = []
for i in range(len(self.classes_)):
R = self.rotations_[i]
S = self.scalings_[i]
Xm = X - self.means_[i]
X2 = np.dot(Xm, R * (S ** (-0.5)))
norm2.append(np.sum(X2 ** 2, axis=1))
norm2 = np.array(norm2).T # shape = [len(X), n_classes]
u = np.asarray([np.sum(np.log(s)) for s in self.scalings_])
return (-0.5 * (norm2 + u) + np.log(self.priors_))
def decision_function(self, X):
"""Apply decision function to an array of samples.
The decision function is equal (up to a constant factor) to the
log-posterior of the model, i.e. `log p(y = k | x)`. In a binary
classification setting this instead corresponds to the difference
`log p(y = 1 | x) - log p(y = 0 | x)`. See :ref:`lda_qda_math`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples (test vectors).
Returns
-------
C : ndarray of shape (n_samples,) or (n_samples, n_classes)
Decision function values related to each class, per sample.
In the two-class case, the shape is (n_samples,), giving the
log likelihood ratio of the positive class.
"""
dec_func = self._decision_function(X)
# handle special case of two classes
if len(self.classes_) == 2:
return dec_func[:, 1] - dec_func[:, 0]
return dec_func
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
"""
d = self._decision_function(X)
y_pred = self.classes_.take(d.argmax(1))
return y_pred
def predict_proba(self, X):
"""Return posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior probabilities of classification per class.
"""
values = self._decision_function(X)
# compute the likelihood of the underlying gaussian models
# up to a multiplicative constant.
likelihood = np.exp(values - values.max(axis=1)[:, np.newaxis])
# compute posterior probabilities
return likelihood / likelihood.sum(axis=1)[:, np.newaxis]
def predict_log_proba(self, X):
"""Return log of posterior probabilities of classification.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Array of samples/test vectors.
Returns
-------
C : ndarray of shape (n_samples, n_classes)
Posterior log-probabilities of classification per class.
"""
# XXX : can do better to avoid precision overflows
probas_ = self.predict_proba(X)
return np.log(probas_)
|
{
"content_hash": "06f8719d6d9e74d344f40e67a3e3748a",
"timestamp": "",
"source": "github",
"line_count": 928,
"max_line_length": 79,
"avg_line_length": 37.44396551724138,
"alnum_prop": 0.5856164383561644,
"repo_name": "kevin-intel/scikit-learn",
"id": "3cb6cc1712f29af71e9d4daf5eb71959620be5da",
"size": "34748",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sklearn/discriminant_analysis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "394787"
},
{
"name": "C++",
"bytes": "140225"
},
{
"name": "Makefile",
"bytes": "1579"
},
{
"name": "PowerShell",
"bytes": "17042"
},
{
"name": "Python",
"bytes": "6394128"
},
{
"name": "Shell",
"bytes": "9250"
}
],
"symlink_target": ""
}
|
from penguincomputing import *
|
{
"content_hash": "7ea7d977c1dec6e604f94116c1f8eb71",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.8387096774193549,
"repo_name": "motivator/clusto",
"id": "6eeee7bf64adce27b0dfd9e56873cf5dbf890348",
"size": "31",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "contrib/diggext/drivers/devices/servers/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1496"
},
{
"name": "HTML",
"bytes": "5677"
},
{
"name": "Python",
"bytes": "346861"
}
],
"symlink_target": ""
}
|
import sphinx_rtd_theme
# -- Project information -----------------------------------------------------
project = 'galgebra'
copyright = '2014-2019, Alan Bromborsky and GAlgebra team'
author = 'Alan Bromborsky'
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'nbsphinx',
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.coverage',
'sphinx.ext.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
'IPython.sphinxext.ipython_directive',
'IPython.sphinxext.ipython_console_highlighting',
'sphinx_markdown_tables',
'm2r',
'releases'
]
# -- nbsphinx configuration ---------------------------------------------------
import galgebra
import nbsphinx
# nbsphinx_execute = 'always'
nbsphinx_execute = 'never'
nbsphinx_allow_errors=True
nbsphinx_kernel_name='python'
nbsphinx_timeout = 60
# -- extensions configuration ---------------------------------------------------
napoleon_include_init_with_doc= False
autoclass_content = "both" # include both class docstring and __init__
autodoc_default_flags = [
# Make sure that any autodoc declarations show the right members
"members",
"inherited-members",
# "undoc-members",
# "special-members",
# "private-members",
# "show-inheritance",
]
#autodoc_default_flags='members'
# you have to list all files with automodule here due to bug in sphinx and nbsphinx
# https://github.com/spatialaudio/nbsphinx/issues/14
autosummary_generate=['api']
numpydoc_show_class_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# from recommonmark.parser import CommonMarkParser
# source_parsers = {
# '.md': CommonMarkParser,
# }
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ['.rst', '.md']
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', '**.ipynb_checkpoints', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If your project is hosted on Github, set the releases_github_path setting instead,
# to e.g. account/project. Releases will then use an appropriate Github URL for both
# releases and issues.
releases_github_path = 'pygae/galgebra'
# You may optionally set releases_debug = True to see debug output while building your docs.
releases_debug = True
# If your changelog includes “simple” pre-1.0 releases derived from a single branch
# (i.e. without stable release lines & semantic versioning) you may want to set
# releases_unstable_prehistory = True.
releases_unstable_prehistory = True
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_context = {
'css_files': [
'_static/theme_overrides.css', # override wide tables in RTD theme
],
}
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'galgebradoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'galgebra.tex', 'galgebra Documentation',
'Alan Bromborsky', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'galgebra', 'galgebra Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'galgebra', 'galgebra Documentation',
author, 'galgebra', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
{
"content_hash": "81604f25c5fd1c0353d6c39f7d8d1853",
"timestamp": "",
"source": "github",
"line_count": 223,
"max_line_length": 92,
"avg_line_length": 30.408071748878925,
"alnum_prop": 0.649166789559062,
"repo_name": "arsenovic/galgebra",
"id": "b122a89542b1397b4c7ccc71e8caf6eeea6a1287",
"size": "7432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "304229"
},
{
"name": "Shell",
"bytes": "634"
}
],
"symlink_target": ""
}
|
class Var(object):
def __init__(self, name):
super(Var, self).__init__()
self.name = name
|
{
"content_hash": "09d378db9f7f737076503b070cf20e1e",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 35,
"avg_line_length": 27.5,
"alnum_prop": 0.5181818181818182,
"repo_name": "JohnVinyard/featureflow",
"id": "a04ac9a866699c9db4065ceee3e2218ff1274326",
"size": "111",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "featureflow/var.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "164752"
},
{
"name": "Shell",
"bytes": "64"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from logins.models import UserProfile
class UserForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username and
password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
'password_mismatch': _("The two password fields didn't match."),
}
username = forms.RegexField(label=_("Username"), max_length=30,
regex=r'^[\w.@+-]+$',
help_text=_("Required. 30 characters or fewer. Letters, digits and "
"@/./+/-/_ only."),
error_messages={
'invalid': _("This value may contain only letters, numbers and "
"@/./+/-/_ characters.")})
password1 = forms.CharField(label=_("Password"),
widget=forms.PasswordInput)
password2 = forms.CharField(label=_("Password confirmation"),
widget=forms.PasswordInput,
help_text=_("Enter the same password as above, for verification."))
class Meta:
model = User
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
def clean_password2(self):
password1 = self.cleaned_data.get("password1")
password2 = self.cleaned_data.get("password2")
if password1 and password2 and password1 != password2:
raise forms.ValidationError(
self.error_messages['password_mismatch'],
code='password_mismatch',
)
return password2
def save(self, commit=True):
user = super(UserForm, self).save(commit=False)
user.set_password(self.cleaned_data["password1"])
if commit:
user.save()
return user
class UserProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['real_name', 'school_id', 'phone_num']
exclude = ['user']
|
{
"content_hash": "78b6ca4e0a8d371e1d8f39fa4607a96e",
"timestamp": "",
"source": "github",
"line_count": 69,
"max_line_length": 79,
"avg_line_length": 35.52173913043478,
"alnum_prop": 0.6054671562627499,
"repo_name": "adamwen/qdulab",
"id": "62a1dcc27363f3c4304c8411899433170430331e",
"size": "2451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "logins/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "136711"
},
{
"name": "JavaScript",
"bytes": "58458"
},
{
"name": "Python",
"bytes": "27156"
}
],
"symlink_target": ""
}
|
import pandas as pd
import pyarrow as pa
import vaex
df = pd.DataFrame({'col1': range(5)})
table = pa.Table.from_pandas(df)
def test_arrow_write_table(tmpdir):
path = str(tmpdir.join('test.arrow'))
with pa.OSFile(path, 'wb') as sink:
with pa.RecordBatchFileWriter(sink, table.schema) as writer:
writer.write_table(table)
df = vaex.open(path)
def test_arrow_write_stream(tmpdir):
path = str(tmpdir.join('test.arrow'))
with pa.OSFile(path, 'wb') as sink:
with pa.RecordBatchStreamWriter(sink, table.schema) as writer:
writer.write_table(table)
df = vaex.open(path)
def test_chunks(df_trimmed, tmpdir):
path = str(tmpdir.join('test.arrow'))
df = df_trimmed[['x', 'y', 'name']]
df.export_arrow_stream(path, chunk_size=2)
df_read = vaex.open(path, as_numpy=False)
assert isinstance(df_read.columns['x'], pa.ChunkedArray)
assert df_read.x.tolist() == df.x.tolist()
|
{
"content_hash": "6f861400f7408124ca57bfa9ca86ac0b",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 70,
"avg_line_length": 28.96969696969697,
"alnum_prop": 0.6537656903765691,
"repo_name": "maartenbreddels/vaex",
"id": "79279fba895a3b4053ba7b9eb12d3b8dbf234d09",
"size": "956",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/arrow/io_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1888"
},
{
"name": "C++",
"bytes": "81166"
},
{
"name": "CSS",
"bytes": "6604"
},
{
"name": "GLSL",
"bytes": "6204"
},
{
"name": "HTML",
"bytes": "177613"
},
{
"name": "JavaScript",
"bytes": "1489136"
},
{
"name": "Makefile",
"bytes": "432"
},
{
"name": "PHP",
"bytes": "33807"
},
{
"name": "Python",
"bytes": "1893232"
},
{
"name": "Shell",
"bytes": "4639"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class HovertemplatesrcValidator(_plotly_utils.basevalidators.SrcValidator):
def __init__(
self, plotly_name="hovertemplatesrc", parent_name="histogram", **kwargs
):
super(HovertemplatesrcValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "none"),
**kwargs,
)
|
{
"content_hash": "dd1459129ff3e119477d76a7c3fbb048",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 79,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.6247139588100686,
"repo_name": "plotly/plotly.py",
"id": "1d9b17b812e0b14bb78d34859cd581315dbf96f7",
"size": "437",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/histogram/_hovertemplatesrc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
}
|
"""
Django settings for {{ project_name }} project.
For more information on this file, see
https://docs.djangoproject.com/en/dev/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/dev/ref/settings/
Quick-start development settings - unsuitable for production, see
https://docs.djangoproject.com/en/dev/howto/deployment/checklist/
"""
import os
from decouple import config
from dj_database_url import parse as db_url
from django.conf import settings
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config('SECRET_KEY', default='rk@*3e&di0wztcmdm-6z$v4h@zd+hv1&q&+4-8d05o+h_yp!k3')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = config('DEBUG', default=False, cast=bool)
TEMPLATE_DEBUG = DEBUG
# SECURITY WARNING: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-ALLOWED_HOSTS
ALLOWED_HOSTS = ['.localhost', '127.0.0.1']
# Project applications
PROJECT_APPS = (
'{{ project_name }}.core',
)
# External applications
INSTALLED_APPS = (
# REQUIREMENT: Grappelli
# - Should be before django.contrib.admin
# - TEMPLATE_CONTEXT_PROCESSORS: "django.core.context_processors.request"
'grappelli',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# External Libraries
'compressor',
'debug_toolbar.apps.DebugToolbarConfig',
'django_extensions',
'reversion',
'taggit',
'widget_tweaks',
'django_nose',
) + PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = '{{ project_name }}.urls'
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Database
# https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': config(
'DATABASE_URL',
default='sqlite:///' + os.path.join(BASE_DIR, 'db.sqlite3'),
cast=db_url),
}
# Internationalization
# https://docs.djangoproject.com/en/dev/topics/i18n/
LANGUAGE_CODE = 'pt-br'
USE_I18N = True
USE_L10N = True
USE_TZ = True
TIME_ZONE = 'UTC'
LANGUAGES = (
('pt-br', u'Português'),
('en', u'Inglês'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/dev/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATIC_URL = '/static/'
# Media files (Files uploaded by users)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.FileSystemFinder',
'compressor.finders.CompressorFinder',
)
TEMPLATE_CONTEXT_PROCESSORS = settings.TEMPLATE_CONTEXT_PROCESSORS + (
"django.core.context_processors.request",
)
# E-mail:
SERVER_EMAIL = config('SERVER_EMAIL', default=settings.SERVER_EMAIL)
DEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL', default=settings.DEFAULT_FROM_EMAIL)
EMAIL_HOST = config('EMAIL_HOST', default=settings.EMAIL_HOST)
EMAIL_HOST_USER = config('EMAIL_HOST_USER', default=settings.EMAIL_HOST_USER)
EMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=settings.EMAIL_HOST_PASSWORD)
EMAIL_PORT = config('EMAIL_PORT', default=settings.EMAIL_PORT)
EMAIL_USE_TLS = config('EMAIL_USE_TLS', default=settings.EMAIL_USE_TLS)
EMAIL_USE_SSL = config('EMAIL_USE_SSL', default=settings.EMAIL_USE_SSL)
"""
Configurations implemented by External Libraries
"""
# Django Extensions
SHELL_PLUS = "ipython"
# Django Nose
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = ['--nocapture', '--nologcapture']
if os.environ.get('COVERAGE'):
NOSE_ARGS += [
'--with-xcoverage',
'--cover-package={{ project_name }}',
'--xcoverage-file=%s' % (
os.path.join(BASE_DIR, 'coverage/coverage.xml')),
]
|
{
"content_hash": "bf17bad589c5811cbe9682a6f9ead6b3",
"timestamp": "",
"source": "github",
"line_count": 145,
"max_line_length": 97,
"avg_line_length": 30.324137931034482,
"alnum_prop": 0.7143506936547646,
"repo_name": "dullaran/django-project-template",
"id": "e705bcee0e83447fcb7776ceac8071f1e547fef7",
"size": "4415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "9289"
},
{
"name": "Python",
"bytes": "6198"
},
{
"name": "Shell",
"bytes": "327"
}
],
"symlink_target": ""
}
|
"""Test autoreject."""
# Author: Mainak Jas <mainak.jas@telecom-paristech.fr>
# Denis A. Engemann <denis.engemann@gmail.com>
# Stefan Appelhoff <stefan.appelhoff@mailbox.org>
#
# License: BSD-3-Clause
import os.path as op
import pickle
import platform
import numpy as np
from numpy.testing import assert_array_equal
import pytest
import mne
from mne.datasets import testing
from mne import io
from mne.utils import _TempDir
from autoreject import (_GlobalAutoReject, _AutoReject, AutoReject,
compute_thresholds, validation_curve,
get_rejection_threshold, read_auto_reject,
read_reject_log)
from autoreject.utils import _get_picks_by_type
from autoreject.autoreject import _get_interp_chs
data_path = testing.data_path(download=False)
raw_fname = data_path / 'MEG' / 'sample' / 'sample_audvis_trunc_raw.fif'
fname_nirx = op.join(
data_path, 'NIRx', 'nirscout', 'nirx_15_2_recording_w_short')
ignore_decim = pytest.mark.filterwarnings(
'ignore:The measurement information indicates a low-pass.*:RuntimeWarning')
@ignore_decim
@testing.requires_testing_data
def test_global_autoreject():
"""Test global autoreject."""
raw = io.read_raw_fif(raw_fname, preload=False)
raw.del_proj()
event_id = None
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
picks = mne.pick_types(raw.info, meg=True, eeg=True, stim=False,
eog=True, exclude=[])
# raise error if preload is false
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0),
reject=None, preload=False)
# Test get_rejection_thresholds.
reject1 = get_rejection_threshold(epochs, decim=1, random_state=42)
reject2 = get_rejection_threshold(epochs, decim=1, random_state=42)
reject3 = get_rejection_threshold(epochs, decim=2, random_state=42)
tols = dict(eeg=5e-6, eog=5e-6, grad=10e-12, mag=5e-15)
if platform.system().lower().startswith("win"): # pragma: no cover
# XXX: When testing on Windows, the precision seemed to be lower. Why?
tols = dict(eeg=9e-5, eog=9e-5, grad=10e-12, mag=5e-15)
assert reject1, isinstance(reject1, dict)
for key, value in list(reject1.items()):
assert reject1[key] == reject2[key]
assert abs(reject1[key] - reject3[key]) < tols[key]
reject = get_rejection_threshold(epochs, decim=4, ch_types='eeg')
assert 'eog' not in reject
assert 'eeg' in reject
pytest.raises(ValueError, get_rejection_threshold, epochs,
decim=4, ch_types=5)
@ignore_decim
@testing.requires_testing_data
def test_autoreject():
"""Test basic _AutoReject functionality."""
raw = io.read_raw_fif(raw_fname, preload=False)
raw.del_proj()
raw.info['bads'] = []
event_id = None
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
##########################################################################
# picking epochs
include = [u'EEG %03d' % i for i in range(1, 45, 3)]
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, include=include, exclude=[])
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), decim=10,
reject=None, preload=False)[:10]
ar = _AutoReject()
pytest.raises(ValueError, ar.fit, epochs)
epochs.load_data()
ar.fit(epochs)
assert len(ar.picks_) == len(picks) - 1
# epochs with no picks.
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
baseline=(None, 0), decim=10,
reject=None, preload=True)[:20]
# let's drop some channels to speed up
pre_picks = mne.pick_types(epochs.info, meg=True, eeg=True)
pre_picks = np.r_[
mne.pick_types(epochs.info, meg='mag', eeg=False)[::15],
mne.pick_types(epochs.info, meg='grad', eeg=False)[::60],
mne.pick_types(epochs.info, meg=False, eeg=True)[::16],
mne.pick_types(epochs.info, meg=False, eeg=False, eog=True)]
pick_ch_names = [epochs.ch_names[pp] for pp in pre_picks]
bad_ch_names = [epochs.ch_names[ix] for ix in range(len(epochs.ch_names))
if ix not in pre_picks]
epochs_with_bads = epochs.copy()
epochs_with_bads.info['bads'] = bad_ch_names
epochs.pick_channels(pick_ch_names)
epochs_fit = epochs[:12] # make sure to use different size of epochs
epochs_new = epochs[12:]
epochs_with_bads_fit = epochs_with_bads[:12]
X = epochs_fit.get_data()
n_epochs, n_channels, n_times = X.shape
X = X.reshape(n_epochs, -1)
ar = _GlobalAutoReject()
pytest.raises(ValueError, ar.fit, X)
ar = _GlobalAutoReject(n_channels=n_channels)
pytest.raises(ValueError, ar.fit, X)
ar = _GlobalAutoReject(n_times=n_times)
pytest.raises(ValueError, ar.fit, X)
ar_global = _GlobalAutoReject(
n_channels=n_channels, n_times=n_times, thresh=40e-6)
ar_global.fit(X)
param_range = np.linspace(40e-6, 200e-6, 10)
train_scores, test_scores = \
validation_curve(epochs_fit, param_range=param_range)
assert len(train_scores) == len(test_scores)
train_scores, test_scores, param_range = \
validation_curve(epochs_fit, return_param_range=True)
assert len(train_scores) == len(test_scores) == len(param_range)
pytest.raises(ValueError, validation_curve, X, param_range=param_range)
##########################################################################
# picking AutoReject
picks = mne.pick_types(
epochs.info, meg='mag', eeg=True, stim=False, eog=False,
include=[], exclude=[])
non_picks = mne.pick_types(
epochs.info, meg='grad', eeg=False, stim=False, eog=False,
include=[], exclude=[])
ch_types = ['mag', 'eeg']
ar = _AutoReject(picks=picks) # XXX : why do we need this??
ar = AutoReject(cv=3, picks=picks, random_state=42,
n_interpolate=[1, 2], consensus=[0.5, 1])
pytest.raises(AttributeError, ar.fit, X)
pytest.raises(ValueError, ar.transform, X)
pytest.raises(ValueError, ar.transform, epochs)
epochs_nochs = epochs_fit.copy()
# just one channel loc is nan or all channel locs are 0.
# Should raise error in both cases
epochs_nochs.info['chs'][1]['loc'][:] = np.nan
pytest.raises(RuntimeError, ar.fit, epochs_nochs)
for ch in epochs_nochs.info['chs']:
ch['loc'] = np.zeros_like(ch['loc'])
pytest.raises(RuntimeError, ar.fit, epochs_nochs)
ar2 = AutoReject(cv=3, picks=picks, random_state=42,
n_interpolate=[1, 2], consensus=[0.5, 1],
verbose='blah')
with pytest.raises(ValueError, match='boolean'):
ar2.fit(epochs_fit)
ar.fit(epochs_fit)
reject_log = ar.get_reject_log(epochs_fit)
for ch_type in ch_types:
# test that kappa & rho are selected
assert ar.n_interpolate_[ch_type] in ar.n_interpolate
assert ar.consensus_[ch_type] in ar.consensus
assert (ar.n_interpolate_[ch_type] ==
ar.local_reject_[ch_type].n_interpolate_[ch_type])
assert (ar.consensus_[ch_type] ==
ar.local_reject_[ch_type].consensus_[ch_type])
# test complementarity of goods and bads
assert_array_equal(len(reject_log.bad_epochs), len(epochs_fit))
# test that transform does not change state of ar
epochs_clean = ar.transform(epochs_fit) # apply same data
assert repr(ar)
assert repr(ar.local_reject_)
reject_log2 = ar.get_reject_log(epochs_fit)
assert_array_equal(reject_log.labels, reject_log2.labels)
assert_array_equal(reject_log.bad_epochs, reject_log2.bad_epochs)
assert_array_equal(reject_log.ch_names, reject_log2.ch_names)
epochs_new_clean = ar.transform(epochs_new) # apply to new data
reject_log_new = ar.get_reject_log(epochs_new)
assert_array_equal(len(reject_log_new.bad_epochs), len(epochs_new))
assert len(reject_log_new.bad_epochs) != len(reject_log.bad_epochs)
picks_by_type = _get_picks_by_type(epochs.info, ar.picks)
# test correct entries in fix log
assert np.isnan(reject_log_new.labels[:, non_picks]).sum() > 0
assert np.isnan(reject_log_new.labels[:, picks]).sum() == 0
assert (reject_log_new.labels.shape ==
(len(epochs_new), len(epochs_new.ch_names)))
# test correct interpolations by type
for ch_type, this_picks in picks_by_type:
interp_counts = np.sum(
reject_log_new.labels[:, this_picks] == 2, axis=1)
labels = reject_log_new.labels.copy()
not_this_picks = np.setdiff1d(np.arange(labels.shape[1]), this_picks)
labels[:, not_this_picks] = np.nan
interp_channels = _get_interp_chs(
labels, reject_log.ch_names, this_picks)
assert_array_equal(
interp_counts, [len(cc) for cc in interp_channels])
assert len(epochs_new_clean.get_data()) != len(epochs_new.get_data())
# test that transform can take new reject_log
reject_log1 = ar.get_reject_log(epochs)
assert reject_log1.bad_epochs.sum() > 0 # ensure > 1 bad epoch
reject_log1.bad_epochs[:] = False
epochs_nobad = ar.transform(epochs, reject_log=reject_log1)
assert len(epochs_nobad) == len(epochs)
pytest.raises(ValueError, ar.transform, epochs, reject_log='blah')
# test that transform ignores bad channels
epochs_with_bads_fit.pick_types(meg='mag', eeg=True, eog=True, exclude=[])
ar_bads = AutoReject(cv=3, random_state=42,
n_interpolate=[1, 2], consensus=[0.5, 1])
with pytest.warns(UserWarning, match='151 channels are marked as bad'):
ar_bads.fit(epochs_with_bads_fit)
with pytest.warns(UserWarning, match='151 channels are marked as bad'):
epochs_with_bads_clean = ar_bads.transform(epochs_with_bads_fit)
good_w_bads_ix = mne.pick_types(epochs_with_bads_clean.info,
meg='mag', eeg=True, eog=True,
exclude='bads')
good_wo_bads_ix = mne.pick_types(epochs_clean.info,
meg='mag', eeg=True, eog=True,
exclude='bads')
assert_array_equal(epochs_with_bads_clean.get_data()[:, good_w_bads_ix, :],
epochs_clean.get_data()[:, good_wo_bads_ix, :])
bad_ix = [epochs_with_bads_clean.ch_names.index(ch)
for ch in epochs_with_bads_clean.info['bads']]
epo_ix = ~ar_bads.get_reject_log(epochs_with_bads_fit).bad_epochs
assert_array_equal(
epochs_with_bads_clean.get_data()[:, bad_ix, :],
epochs_with_bads_fit.get_data()[epo_ix, :, :][:, bad_ix, :])
assert epochs_clean.ch_names == epochs_fit.ch_names
assert isinstance(ar.threshes_, dict)
assert len(ar.picks) == len(picks)
assert len(ar.threshes_.keys()) == len(ar.picks)
pick_eog = mne.pick_types(epochs.info, meg=False, eeg=False, eog=True)[0]
assert epochs.ch_names[pick_eog] not in ar.threshes_.keys()
pytest.raises(
IndexError, ar.transform,
epochs.copy().pick_channels(
[epochs.ch_names[pp] for pp in picks[:3]]))
epochs.load_data()
pytest.raises(ValueError, compute_thresholds, epochs, 'dfdfdf')
index, ch_names = zip(*[(ii, epochs_fit.ch_names[pp])
for ii, pp in enumerate(picks)])
threshes_a = compute_thresholds(
epochs_fit, picks=picks, method='random_search')
assert set(threshes_a.keys()) == set(ch_names)
threshes_b = compute_thresholds(
epochs_fit, picks=picks, method='bayesian_optimization')
assert set(threshes_b.keys()) == set(ch_names)
@ignore_decim
@testing.requires_testing_data
def test_io():
"""Test IO functionality."""
raw = io.read_raw_fif(raw_fname, preload=False)
raw.del_proj()
event_id = None
tmin, tmax = -0.2, 0.5
events = mne.find_events(raw)
savedir = _TempDir()
fname = op.join(savedir, 'autoreject.hdf5')
include = [u'EEG %03d' % i for i in range(1, 45, 3)]
picks = mne.pick_types(raw.info, meg=False, eeg=False, stim=False,
eog=True, include=include, exclude=[])
# raise error if preload is false
epochs = mne.Epochs(raw, events, event_id, tmin, tmax,
picks=picks, baseline=(None, 0), decim=4,
reject=None, preload=True)[:10]
ar = AutoReject(cv=2, random_state=42, n_interpolate=[1],
consensus=[0.5], verbose=False)
ar.save(fname) # save without fitting
pkl_ar = pickle.dumps(ar) # also, pickle without fitting
# check that fit after saving is the same as fit without saving/pickling
ar2 = read_auto_reject(fname)
ar3 = pickle.loads(pkl_ar)
ar.fit(epochs)
ar2.fit(epochs)
ar3.fit(epochs)
assert np.sum([ar.threshes_[k] - ar2.threshes_[k]
for k in ar.threshes_.keys()]) == 0.
assert np.sum([ar.threshes_[k] - ar3.threshes_[k]
for k in ar.threshes_.keys()]) == 0.
pytest.raises(ValueError, ar.save, fname)
ar.save(fname, overwrite=True)
pkl_ar2 = pickle.dumps(ar)
ar4 = read_auto_reject(fname)
ar5 = pickle.loads(pkl_ar2)
epochs_clean1, reject_log1 = ar.transform(epochs, return_log=True)
epochs_clean2, reject_log2 = ar4.transform(epochs, return_log=True)
epochs_clean3, reject_log3 = ar5.transform(epochs, return_log=True)
assert_array_equal(epochs_clean1.get_data(), epochs_clean2.get_data())
assert_array_equal(epochs_clean1.get_data(), epochs_clean3.get_data())
assert_array_equal(reject_log1.labels, reject_log2.labels)
assert_array_equal(reject_log1.labels, reject_log3.labels)
reject_log1.save(op.join(savedir, 'reject_log.npz'))
reject_log4 = read_reject_log(op.join(savedir, 'reject_log.npz'))
assert_array_equal(reject_log1.labels, reject_log4.labels)
assert_array_equal(reject_log1.bad_epochs, reject_log4.bad_epochs)
assert all(reject_log1.ch_names == reject_log4.ch_names)
@testing.requires_testing_data
def test_fnirs():
"""Test that autoreject runs on fNIRS data."""
raw = mne.io.read_raw_nirx(fname_nirx)
raw = mne.preprocessing.nirs.optical_density(raw)
raw = mne.preprocessing.nirs.beer_lambert_law(raw)
events, _ = mne.events_from_annotations(raw)
epochs = mne.Epochs(raw, events,
tmin=-1, tmax=1,
proj=True, baseline=(None, 0), preload=True,
detrend=None, verbose=True)
# Test autoreject
ar = AutoReject(cv=3, consensus=[0.1])
assert len(epochs) == 3
epochs_clean = ar.fit_transform(epochs)
assert len(epochs_clean) < len(epochs)
# Test threshold extraction
reject = get_rejection_threshold(epochs, cv=3)
assert "hbo" in reject.keys()
assert "hbr" in reject.keys()
assert reject["hbo"] < 0.001 # This is a very high value as sanity check
assert reject["hbr"] < 0.001
assert reject["hbr"] > 0.0
@testing.requires_testing_data
@pytest.mark.parametrize('ch_type', ['ecog', 'seeg'])
def test_ecog(ch_type):
"""Test that autoreject runs on ECoG and sEEG data."""
raw = mne.io.read_raw(raw_fname).del_proj().pick('eeg')
raw.info['bads'] = []
# setting the channel types
ch_dict = {ch: ch_type for ch in raw.ch_names}
raw.set_channel_types(ch_dict)
# make events
epochs = mne.make_fixed_length_epochs(raw).load_data()
n1 = len(epochs)
reject = get_rejection_threshold(epochs)
epochs.drop_bad(reject=reject)
n2 = len(epochs)
assert ch_type in reject.keys()
assert reject[ch_type] > 0.0
assert reject[ch_type] < 0.01
assert n2 < n1
# testing that compute_thresholds is working without location data
epochs.set_montage(None)
rejects = compute_thresholds(epochs, augment=False)
assert set(rejects.keys()) == set(raw.ch_names)
|
{
"content_hash": "2e02836a93fb92ee4cb51d5ee080f3bb",
"timestamp": "",
"source": "github",
"line_count": 399,
"max_line_length": 79,
"avg_line_length": 40.38095238095238,
"alnum_prop": 0.6263033763654419,
"repo_name": "autoreject/autoreject",
"id": "adb095e15a472d281f00197e90232ac8550efaaf",
"size": "16112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "autoreject/tests/test_autoreject.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "847"
},
{
"name": "Python",
"bytes": "122619"
}
],
"symlink_target": ""
}
|
#!/usr/bin/env python
def find_regex_list(regex,filelist):
import re,os
dig3tlist = []
try:
for f in filelist:
found = re.findall(regex, f)
if found:
dig3tlist.append(f)
dig3tlist = sorted(dig3tlist)
else:
continue
except TypeError:
print "Error"
return dig3tlist
def found3digit_rename(filename):
import os
#print filename
fdir = os.path.dirname(filename)
destdir = fdir #.strip("['")
#print destdir
fname = os.path.basename(filename)
style = fname.split('_')[0]
ext = fname.split('.')[-1]
oldname = filename
incr = 1
newstyle = str(style + "_" + str(incr) + "." + ext)
newname = os.path.join(destdir, newstyle)
while os.path.isfile(newname) == True:
newstyle = str(style + "_" + str(incr) + "." + ext)
newname = os.path.join(destdir, newstyle)
print newname
incr += 1
os.path.isfile(newname)
else:
#print newname
os.rename(oldname,newname)
return
import re,os,glob
dir_pushstill = '/mnt/Post_Ready/aPhotoPush'
dir_pushfashion = '/mnt/Post_Ready/aFashionPush'
pushstillfiles = glob.glob(os.path.join(dir_pushstill, '*/*/*.jpg'))
pushfashionfiles = glob.glob(os.path.join(dir_pushfashion, '*/*/*.jpg'))
regex_3 = re.compile(r'.+?/[2-9][0-9]{8}_[0-9]{3}.jpg')
foundliststill = find_regex_list(regex_3,pushstillfiles)
for f in foundliststill:
print f
found3digit_rename(f)
foundlistfashion = find_regex_list(regex_3,pushfashionfiles)
for f in foundlistfashion:
print f
found3digit_rename(f)
|
{
"content_hash": "49f1cafcafeb25bd66ff6849ba17ce87",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 72,
"avg_line_length": 26.328125,
"alnum_prop": 0.5940652818991098,
"repo_name": "relic7/prodimages",
"id": "eb0792eb6770a0a737ad619a9f04f7c7ce99cf9b",
"size": "1685",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/python3digitextphotorename.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16783"
},
{
"name": "HTML",
"bytes": "88323"
},
{
"name": "JavaScript",
"bytes": "158855"
},
{
"name": "PHP",
"bytes": "70412"
},
{
"name": "PLSQL",
"bytes": "72767"
},
{
"name": "Perl",
"bytes": "7143"
},
{
"name": "Python",
"bytes": "4922301"
},
{
"name": "Shell",
"bytes": "423422"
},
{
"name": "Smarty",
"bytes": "571"
},
{
"name": "VimL",
"bytes": "6045"
}
],
"symlink_target": ""
}
|
"""
@author: moloch
Copyright 2013
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from threading import Lock
from builtins import object
class Singleton(object):
"""Thread safe singleton"""
def __init__(self, decorated):
self._decorated = decorated
self._instance_lock = Lock()
def instance(self):
"""
Returns the singleton instance. Upon its first call, it creates a
new instance of the decorated class and calls its `__init__` method.
On all subsequent calls, the already created instance is returned.
"""
if not hasattr(self, "_instance"):
with self._instance_lock:
if not hasattr(self, "_instance"):
self._instance = self._decorated()
return self._instance
def __call__(self):
raise TypeError("Singletons must be accessed through the `instance` method.")
|
{
"content_hash": "4652600ca0140b445a78aaab1d30cbe3",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 85,
"avg_line_length": 32.56818181818182,
"alnum_prop": 0.6608513607815771,
"repo_name": "moloch--/RootTheBox",
"id": "099eae2d4f4f799665cb9294246a7610ad4805b4",
"size": "1457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "libs/Singleton.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "209844"
},
{
"name": "Dockerfile",
"bytes": "1632"
},
{
"name": "HTML",
"bytes": "649097"
},
{
"name": "JavaScript",
"bytes": "361722"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "671684"
},
{
"name": "Shell",
"bytes": "3824"
}
],
"symlink_target": ""
}
|
import os
import shutil
import tempfile
import threading
import time
from unittest import TestCase
from vsqs import queue
class QueueTest(TestCase):
def setUp(self):
self.q = queue.Queue(tempfile.mkdtemp())
def tearDown(self):
self.q.close()
shutil.rmtree(self.q.path)
# reset the clock:
queue.micros = lambda: int(time.time() * 1000000)
def test_publish(self):
queue.micros = lambda: 0
self.assertListEqual([], os.listdir(self.q.path))
self.assertEqual('0', self.q.publish('foo'.encode('ascii')))
self.assertListEqual(['0'], os.listdir(self.q.path))
with open(os.path.join(self.q.path, '0')) as f:
self.assertEqual('foo', f.read())
queue.micros = lambda: 1
self.assertEqual('1', self.q.publish('bar'.encode('ascii')))
with open(os.path.join(self.q.path, '1')) as f:
self.assertEqual('bar', f.read())
self.assertSetEqual({'0', '1'}, set(os.listdir(self.q.path)))
def test_receive(self):
queue.micros = lambda: 0
self.assertEqual('0', self.q.publish('foo'.encode('ascii')))
queue.micros = lambda: 1
self.assertEqual('1', self.q.publish('bar'.encode('ascii')))
self.assertEqual(('0', 'foo'.encode('ascii')),
self.q.receive(visibility_timeout=10))
self.assertEqual(('1', 'bar'.encode('ascii')),
self.q.receive(visibility_timeout=10))
def test_delete(self):
m_id = self.q.publish('foo'.encode('ascii'))
self.q.delete(m_id)
self.assertEqual((None, None),
self.q.receive(visibility_timeout=10, timeout=0.1))
def test_requeue(self):
queue.micros = lambda: 0
self.q.publish('foo'.encode('ascii'))
self.assertEqual(('0', 'foo'.encode('ascii')),
self.q.receive(visibility_timeout=1))
# pretend a second has passed:
queue.micros = lambda: 1000000
# message should have been requeued:
self.assertEqual(('0', 'foo'.encode('ascii')),
self.q.receive(visibility_timeout=1))
def test_notify(self):
"""Wake up receivers when a new message gets published."""
queue.micros = lambda: 0
# have a 2nd thread publish a message while we're blocked waiting
def run():
time.sleep(.1)
self.q.publish('foo'.encode('ascii'))
t = threading.Thread(target=run)
t.start()
self.assertEqual(('0', 'foo'.encode('ascii')),
self.q.receive(visibility_timeout=1))
t.join()
def test_capacity(self):
m1 = 'foo'.encode('ascii')
m2 = 'bar'.encode('ascii')
# fill up the queue
self.q.capacity = 1
self.q.publish(m1)
# assert that publishing another message blocks:
self.assertRaises(queue.QueueFullException,
self.q.publish, m2, timeout=.1)
# have a 2nd thread block on publishing a second message
t = threading.Thread(target=lambda: self.q.publish(m2))
t.start()
# make sure the publisher is blocked before freeing up capacity
time.sleep(.1)
mid, data = self.q.receive()
self.assertEqual(m1, data)
self.q.delete(mid)
t.join()
mid, data = self.q.receive()
self.assertEqual(m2, data)
self.q.delete(mid)
def test_size(self):
self.assertEqual(0, self.q.size())
self.q.publish('foo'.encode('ascii'))
self.assertEqual(1, self.q.size())
self.q.publish('foo'.encode('ascii'))
self.assertEqual(2, self.q.size())
m_id, data = self.q.receive(visibility_timeout=10)
self.assertEqual(2, self.q.size())
self.q.delete(m_id)
self.assertEqual(1, self.q.size())
|
{
"content_hash": "56c01c9836ffac7f64fea3ab6e9b2b94",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 76,
"avg_line_length": 33.144067796610166,
"alnum_prop": 0.5724878547686014,
"repo_name": "erikvanzijst/vsqs",
"id": "e9a9bfca62c2e9fca60ea09edc0d772910ca475c",
"size": "3911",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_queue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15791"
}
],
"symlink_target": ""
}
|
"""
Tools for memoization of function results.
"""
from functools import wraps
from six import iteritems
from weakref import WeakKeyDictionary
class lazyval(object):
"""
Decorator that marks that an attribute should not be computed until
needed, and that the value should be memoized.
Example
-------
>>> from zipline.utils.memoize import lazyval
>>> class C(object):
... def __init__(self):
... self.count = 0
... @lazyval
... def val(self):
... self.count += 1
... return "val"
...
>>> c = C()
>>> c.count
0
>>> c.val, c.count
('val', 1)
>>> c.val, c.count
('val', 1)
"""
def __init__(self, get):
self._get = get
self._cache = WeakKeyDictionary()
def __get__(self, instance, owner):
if instance is None:
return self
try:
return self._cache[instance]
except KeyError:
self._cache[instance] = val = self._get(instance)
return val
def remember_last(f):
"""
Decorator that remembers the last computed value of a function and doesn't
recompute it when called with the same inputs multiple times.
Parameters
----------
f : The function to be memoized. All arguments to f should be hashable.
Example
-------
>>> counter = 0
>>> @remember_last
... def foo(x):
... global counter
... counter += 1
... return x, counter
>>> foo(1)
(1, 1)
>>> foo(1)
(1, 1)
>>> foo(0)
(0, 2)
>>> foo(1)
(1, 3)
Notes
-----
This decorator is equivalent to `lru_cache(1)` in Python 3, but with less
bells and whistles for handling things like threadsafety. If we ever
decide we need such bells and whistles, we should just make functools32 a
dependency.
"""
# This needs to be a mutable data structure so we can change it from inside
# the function. In pure Python 3, we'd use the nonlocal keyword for this.
_previous = [None, None]
KEY, VALUE = 0, 1
_kwd_mark = object()
@wraps(f)
def memoized_f(*args, **kwds):
# Hashing logic taken from functools32.lru_cache.
key = args
if kwds:
key += _kwd_mark + tuple(sorted(iteritems(kwds)))
key_hash = hash(key)
if key_hash != _previous[KEY]:
_previous[VALUE] = f(*args, **kwds)
_previous[KEY] = key_hash
return _previous[VALUE]
return memoized_f
|
{
"content_hash": "fa88c1f9d9b041d1f0ddf2bfab0087c8",
"timestamp": "",
"source": "github",
"line_count": 101,
"max_line_length": 79,
"avg_line_length": 25.14851485148515,
"alnum_prop": 0.5547244094488188,
"repo_name": "sketchytechky/zipline",
"id": "354649bf3850e6b6a61ffae6c027b0425ec2b8de",
"size": "2540",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "zipline/utils/memoize.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1400087"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('report_ia', '0014_kern_gevaar_potentieel_risico'),
]
operations = [
migrations.RunSQL([
"insert into report_ia_kern_gevaar (naam) values ('onzorgvuldige/geen cockpitcheck');",
"insert into report_ia_kern_gevaar (naam) values ('onzorgvuldige/geen daily check');",
"insert into report_ia_kern_gevaar (naam) values ('circuleren op operationeel terrein');",
"insert into report_ia_kern_gevaar (naam) values ('gebrekkig onderhoud LVT');",
"insert into report_ia_kern_gevaar (naam) values ('staat van de infrastructuur');",
"insert into report_ia_kern_gevaar (naam) values ('gebrek aan toezicht door bevoegde persoon');",
"insert into report_ia_kern_gevaar (naam) values ('onervarenheid piloot in');",
"insert into report_ia_kern_gevaar (naam) values ('onzorgvuldige aankoppelprocedure lier');",
"insert into report_ia_kern_gevaar (naam) values ('onzorgvuldige aankoppelprocedure sleper');",
"insert into report_ia_kern_gevaar (naam) values ('luchtruim');",
]
)
]
|
{
"content_hash": "039925b8111828f1bbac8697a29b16f8",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 109,
"avg_line_length": 47.074074074074076,
"alnum_prop": 0.6435877261998426,
"repo_name": "jefke-glider/gliding",
"id": "f9310e2d1d2d74c4bf16b8823cea305f21d424b3",
"size": "1344",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ato/report_ia/migrations/0015_auto_20170111_1236.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "32404"
},
{
"name": "PostScript",
"bytes": "63220"
},
{
"name": "Python",
"bytes": "140918"
}
],
"symlink_target": ""
}
|
from rflint.common import ResourceRule
class Issue30(ResourceRule):
def configure(self, value):
self.value = value
def apply(self,resource):
message = "the configured value is %s" % self.value
self.report(resource, message, 0, 0)
|
{
"content_hash": "a8118887d461a3c9763f4979b83eef10",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 59,
"avg_line_length": 29.333333333333332,
"alnum_prop": 0.6704545454545454,
"repo_name": "boakley/robotframework-lint",
"id": "2e86552ac6805a9b8c017f7cdd58a8f6189fa53f",
"size": "264",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_data/acceptance/issue-30.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "63830"
},
{
"name": "RobotFramework",
"bytes": "65448"
}
],
"symlink_target": ""
}
|
config = {
# The hostname of your server.
"host": "tf.example.com",
# The user for the app to login as.
"user": "srcds",
# The FTP password.
"ftp_pass": "password",
# The method to login as.
# SSH is more secure and more powerful.
# FTP is supported on nearly all hosts.
"connection_agent": "ssh",
# The root directory for the server.
"server_root": "/home/srcds/tf2/tf"
}
|
{
"content_hash": "e2fd06d7aa49ad2b1ff81592c87036d8",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 43,
"avg_line_length": 30.071428571428573,
"alnum_prop": 0.6080760095011877,
"repo_name": "SunDwarf/smadmintools",
"id": "52b824a81cdf056a53c7f248fc51dbe21a5809d4",
"size": "421",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "24697"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
content = open(filename).read()
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", content))
return metadata['version']
setup(
name='Mopidy-Headspring-Web',
version=get_version('mopidy_headspring_web/__init__.py'),
url='https://github.com/bardoloi/Mopidy-Headspring-Web',
license='Apache License, Version 2.0',
author='Headspring Labs',
author_email='vishal.bardoloi@headspring.com',
description='Mopidy Headspring Web extension',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 0.19',
],
entry_points={
'mopidy.ext': [
'headspring_web = mopidy_headspring_web:MusicBoxExtension',
],
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
|
{
"content_hash": "c5c51064ff795bf604bac5254c3a2541",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 71,
"avg_line_length": 30.46511627906977,
"alnum_prop": 0.6213740458015267,
"repo_name": "HeadspringLabs/Mopidy-Headspring-Web",
"id": "c2e65a0853fdba4d5e66d6ec9df858b2507b3e94",
"size": "1310",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "524658"
},
{
"name": "HTML",
"bytes": "23913"
},
{
"name": "JavaScript",
"bytes": "259983"
},
{
"name": "Python",
"bytes": "4302"
}
],
"symlink_target": ""
}
|
__author__ = 'pfCao'
import subprocess
import os
def run_adb_cmd(cmd):
try:
args = ["adb"]
args.extend(cmd)
process = subprocess.Popen(args, stdin=subprocess.PIPE, stdout=subprocess.PIPE)
ret = process.communicate()
except Exception, ex:
print ex
return None
return ret
def main():
module_name = "target"
remote_path = "/data/local/tmp"
run_adb_cmd(["push", os.path.join(os.getcwd(), "libtarget.so"), remote_path])
os.chdir("..")
project_path = os.getcwd()
bin_path = os.path.join(project_path, "libs", "armeabi", module_name)
run_adb_cmd(["push", bin_path, remote_path])
run_adb_cmd(["shell", "chmod", "777", remote_path+"/"+module_name])
if __name__ == '__main__':
main()
# print "Oscilab - Step Sequencer_1.2.3_apk-dl.com.apk".encode("hex")
|
{
"content_hash": "1efefdd065689bdfe67c842c99ee2dc9",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 87,
"avg_line_length": 25.4,
"alnum_prop": 0.5714285714285714,
"repo_name": "ManyFace/AndroidInjection",
"id": "1462808d8b68d3d363f0c87bcfd017c5ee9e222f",
"size": "930",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exercise2-injectMethod/target/jni/push.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "39018"
},
{
"name": "Makefile",
"bytes": "1812"
},
{
"name": "Python",
"bytes": "5866"
}
],
"symlink_target": ""
}
|
import sys
import operator
import time
from itertools import chain
from datetime import datetime
if sys.version < "3":
from itertools import imap as map, ifilter as filter
from py4j.protocol import Py4JJavaError
from pyspark import RDD
from pyspark.storagelevel import StorageLevel
from pyspark.streaming.util import rddToFileName, TransformFunction
from pyspark.rdd import portable_hash
from pyspark.resultiterable import ResultIterable
__all__ = ["DStream"]
class DStream(object):
"""
A Discretized Stream (DStream), the basic abstraction in Spark Streaming,
is a continuous sequence of RDDs (of the same type) representing a
continuous stream of data (see L{RDD} in the Spark core documentation
for more details on RDDs).
DStreams can either be created from live data (such as, data from TCP
sockets, Kafka, Flume, etc.) using a L{StreamingContext} or it can be
generated by transforming existing DStreams using operations such as
`map`, `window` and `reduceByKeyAndWindow`. While a Spark Streaming
program is running, each DStream periodically generates a RDD, either
from live data or by transforming the RDD generated by a parent DStream.
DStreams internally is characterized by a few basic properties:
- A list of other DStreams that the DStream depends on
- A time interval at which the DStream generates an RDD
- A function that is used to generate an RDD after each time interval
"""
def __init__(self, jdstream, ssc, jrdd_deserializer):
self._jdstream = jdstream
self._ssc = ssc
self._sc = ssc._sc
self._jrdd_deserializer = jrdd_deserializer
self.is_cached = False
self.is_checkpointed = False
def context(self):
"""
Return the StreamingContext associated with this DStream
"""
return self._ssc
def count(self):
"""
Return a new DStream in which each RDD has a single element
generated by counting each RDD of this DStream.
"""
return self.mapPartitions(lambda i: [sum(1 for _ in i)]).reduce(operator.add)
def filter(self, f):
"""
Return a new DStream containing only the elements that satisfy predicate.
"""
def func(iterator):
return filter(f, iterator)
return self.mapPartitions(func, True)
def flatMap(self, f, preservesPartitioning=False):
"""
Return a new DStream by applying a function to all elements of
this DStream, and then flattening the results
"""
def func(s, iterator):
return chain.from_iterable(map(f, iterator))
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def map(self, f, preservesPartitioning=False):
"""
Return a new DStream by applying a function to each element of DStream.
"""
def func(iterator):
return map(f, iterator)
return self.mapPartitions(func, preservesPartitioning)
def mapPartitions(self, f, preservesPartitioning=False):
"""
Return a new DStream in which each RDD is generated by applying
mapPartitions() to each RDDs of this DStream.
"""
def func(s, iterator):
return f(iterator)
return self.mapPartitionsWithIndex(func, preservesPartitioning)
def mapPartitionsWithIndex(self, f, preservesPartitioning=False):
"""
Return a new DStream in which each RDD is generated by applying
mapPartitionsWithIndex() to each RDDs of this DStream.
"""
return self.transform(lambda rdd: rdd.mapPartitionsWithIndex(f, preservesPartitioning))
def reduce(self, func):
"""
Return a new DStream in which each RDD has a single element
generated by reducing each RDD of this DStream.
"""
return self.map(lambda x: (None, x)).reduceByKey(func, 1).map(lambda x: x[1])
def reduceByKey(self, func, numPartitions=None):
"""
Return a new DStream by applying reduceByKey to each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.combineByKey(lambda x: x, func, func, numPartitions)
def combineByKey(self, createCombiner, mergeValue, mergeCombiners,
numPartitions=None):
"""
Return a new DStream by applying combineByKey to each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
def func(rdd):
return rdd.combineByKey(createCombiner, mergeValue, mergeCombiners, numPartitions)
return self.transform(func)
def partitionBy(self, numPartitions, partitionFunc=portable_hash):
"""
Return a copy of the DStream in which each RDD are partitioned
using the specified partitioner.
"""
return self.transform(lambda rdd: rdd.partitionBy(numPartitions, partitionFunc))
def foreachRDD(self, func):
"""
Apply a function to each RDD in this DStream.
"""
if func.__code__.co_argcount == 1:
old_func = func
func = lambda t, rdd: old_func(rdd)
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer)
api = self._ssc._jvm.PythonDStream
api.callForeachRDD(self._jdstream, jfunc)
def pprint(self, num=10):
"""
Print the first num elements of each RDD generated in this DStream.
@param num: the number of elements from the first will be printed.
"""
def takeAndPrint(time, rdd):
taken = rdd.take(num + 1)
print("-------------------------------------------")
print("Time: %s" % time)
print("-------------------------------------------")
for record in taken[:num]:
print(record)
if len(taken) > num:
print("...")
print("")
self.foreachRDD(takeAndPrint)
def mapValues(self, f):
"""
Return a new DStream by applying a map function to the value of
each key-value pairs in this DStream without changing the key.
"""
map_values_fn = lambda kv: (kv[0], f(kv[1]))
return self.map(map_values_fn, preservesPartitioning=True)
def flatMapValues(self, f):
"""
Return a new DStream by applying a flatmap function to the value
of each key-value pairs in this DStream without changing the key.
"""
flat_map_fn = lambda kv: ((kv[0], x) for x in f(kv[1]))
return self.flatMap(flat_map_fn, preservesPartitioning=True)
def glom(self):
"""
Return a new DStream in which RDD is generated by applying glom()
to RDD of this DStream.
"""
def func(iterator):
yield list(iterator)
return self.mapPartitions(func)
def cache(self):
"""
Persist the RDDs of this DStream with the default storage level
(C{MEMORY_ONLY}).
"""
self.is_cached = True
self.persist(StorageLevel.MEMORY_ONLY)
return self
def persist(self, storageLevel):
"""
Persist the RDDs of this DStream with the given storage level
"""
self.is_cached = True
javaStorageLevel = self._sc._getJavaStorageLevel(storageLevel)
self._jdstream.persist(javaStorageLevel)
return self
def checkpoint(self, interval):
"""
Enable periodic checkpointing of RDDs of this DStream
@param interval: time in seconds, after each period of that, generated
RDD will be checkpointed
"""
self.is_checkpointed = True
self._jdstream.checkpoint(self._ssc._jduration(interval))
return self
def groupByKey(self, numPartitions=None):
"""
Return a new DStream by applying groupByKey on each RDD.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transform(lambda rdd: rdd.groupByKey(numPartitions))
def countByValue(self):
"""
Return a new DStream in which each RDD contains the counts of each
distinct value in each RDD of this DStream.
"""
return self.map(lambda x: (x, 1)).reduceByKey(lambda x, y: x+y)
def saveAsTextFiles(self, prefix, suffix=None):
"""
Save each RDD in this DStream as at text file, using string
representation of elements.
"""
def saveAsTextFile(t, rdd):
path = rddToFileName(prefix, suffix, t)
try:
rdd.saveAsTextFile(path)
except Py4JJavaError as e:
# after recovered from checkpointing, the foreachRDD may
# be called twice
if 'FileAlreadyExistsException' not in str(e):
raise
return self.foreachRDD(saveAsTextFile)
# TODO: uncomment this until we have ssc.pickleFileStream()
# def saveAsPickleFiles(self, prefix, suffix=None):
# """
# Save each RDD in this DStream as at binary file, the elements are
# serialized by pickle.
# """
# def saveAsPickleFile(t, rdd):
# path = rddToFileName(prefix, suffix, t)
# try:
# rdd.saveAsPickleFile(path)
# except Py4JJavaError as e:
# # after recovered from checkpointing, the foreachRDD may
# # be called twice
# if 'FileAlreadyExistsException' not in str(e):
# raise
# return self.foreachRDD(saveAsPickleFile)
def transform(self, func):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream.
`func` can have one argument of `rdd`, or have two arguments of
(`time`, `rdd`)
"""
if func.__code__.co_argcount == 1:
oldfunc = func
func = lambda t, rdd: oldfunc(rdd)
assert func.__code__.co_argcount == 2, "func should take one or two arguments"
return TransformedDStream(self, func)
def transformWith(self, func, other, keepSerializer=False):
"""
Return a new DStream in which each RDD is generated by applying a function
on each RDD of this DStream and 'other' DStream.
`func` can have two arguments of (`rdd_a`, `rdd_b`) or have three
arguments of (`time`, `rdd_a`, `rdd_b`)
"""
if func.__code__.co_argcount == 2:
oldfunc = func
func = lambda t, a, b: oldfunc(a, b)
assert func.__code__.co_argcount == 3, "func should take two or three arguments"
jfunc = TransformFunction(self._sc, func, self._jrdd_deserializer, other._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformed2DStream(self._jdstream.dstream(),
other._jdstream.dstream(), jfunc)
jrdd_serializer = self._jrdd_deserializer if keepSerializer else self._sc.serializer
return DStream(dstream.asJavaDStream(), self._ssc, jrdd_serializer)
def repartition(self, numPartitions):
"""
Return a new DStream with an increased or decreased level of parallelism.
"""
return self.transform(lambda rdd: rdd.repartition(numPartitions))
@property
def _slideDuration(self):
"""
Return the slideDuration in seconds of this DStream
"""
return self._jdstream.dstream().slideDuration().milliseconds() / 1000.0
def union(self, other):
"""
Return a new DStream by unifying data of another DStream with this DStream.
@param other: Another DStream having the same interval (i.e., slideDuration)
as this DStream.
"""
if self._slideDuration != other._slideDuration:
raise ValueError("the two DStream should have same slide duration")
return self.transformWith(lambda a, b: a.union(b), other, True)
def cogroup(self, other, numPartitions=None):
"""
Return a new DStream by applying 'cogroup' between RDDs of this
DStream and `other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.cogroup(b, numPartitions), other)
def join(self, other, numPartitions=None):
"""
Return a new DStream by applying 'join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.join(b, numPartitions), other)
def leftOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'left outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.leftOuterJoin(b, numPartitions), other)
def rightOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'right outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.rightOuterJoin(b, numPartitions), other)
def fullOuterJoin(self, other, numPartitions=None):
"""
Return a new DStream by applying 'full outer join' between RDDs of this DStream and
`other` DStream.
Hash partitioning is used to generate the RDDs with `numPartitions`
partitions.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
return self.transformWith(lambda a, b: a.fullOuterJoin(b, numPartitions), other)
def _jtime(self, timestamp):
""" Convert datetime or unix_timestamp into Time
"""
if isinstance(timestamp, datetime):
timestamp = time.mktime(timestamp.timetuple())
return self._sc._jvm.Time(long(timestamp * 1000))
def slice(self, begin, end):
"""
Return all the RDDs between 'begin' to 'end' (both included)
`begin`, `end` could be datetime.datetime() or unix_timestamp
"""
jrdds = self._jdstream.slice(self._jtime(begin), self._jtime(end))
return [RDD(jrdd, self._sc, self._jrdd_deserializer) for jrdd in jrdds]
def _validate_window_param(self, window, slide):
duration = self._jdstream.dstream().slideDuration().milliseconds()
if int(window * 1000) % duration != 0:
raise ValueError("windowDuration must be multiple of the slide duration (%d ms)"
% duration)
if slide and int(slide * 1000) % duration != 0:
raise ValueError("slideDuration must be multiple of the slide duration (%d ms)"
% duration)
def window(self, windowDuration, slideDuration=None):
"""
Return a new DStream in which each RDD contains all the elements in seen in a
sliding window of time over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
self._validate_window_param(windowDuration, slideDuration)
d = self._ssc._jduration(windowDuration)
if slideDuration is None:
return DStream(self._jdstream.window(d), self._ssc, self._jrdd_deserializer)
s = self._ssc._jduration(slideDuration)
return DStream(self._jdstream.window(d, s), self._ssc, self._jrdd_deserializer)
def reduceByWindow(self, reduceFunc, invReduceFunc, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated by reducing all
elements in a sliding window over this DStream.
if `invReduceFunc` is not None, the reduction is done incrementally
using the old window's reduced value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
This is more efficient than `invReduceFunc` is None.
@param reduceFunc: associative and commutative reduce function
@param invReduceFunc: inverse reduce function of `reduceFunc`; such that for all y,
and invertible x:
`invReduceFunc(reduceFunc(x, y), x) = y`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
"""
keyed = self.map(lambda x: (1, x))
reduced = keyed.reduceByKeyAndWindow(reduceFunc, invReduceFunc,
windowDuration, slideDuration, 1)
return reduced.map(lambda kv: kv[1])
def countByWindow(self, windowDuration, slideDuration):
"""
Return a new DStream in which each RDD has a single element generated
by counting the number of elements in a window over this DStream.
windowDuration and slideDuration are as defined in the window() operation.
This is equivalent to window(windowDuration, slideDuration).count(),
but will be more efficient if window is large.
"""
return self.map(lambda x: 1).reduceByWindow(operator.add, operator.sub,
windowDuration, slideDuration)
def countByValueAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream in which each RDD contains the count of distinct elements in
RDDs in a sliding window over this DStream.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: number of partitions of each RDD in the new DStream.
"""
keyed = self.map(lambda x: (x, 1))
counted = keyed.reduceByKeyAndWindow(operator.add, operator.sub,
windowDuration, slideDuration, numPartitions)
return counted.filter(lambda kv: kv[1] > 0)
def groupByKeyAndWindow(self, windowDuration, slideDuration, numPartitions=None):
"""
Return a new DStream by applying `groupByKey` over a sliding window.
Similar to `DStream.groupByKey()`, but applies it over a sliding window.
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: Number of partitions of each RDD in the new DStream.
"""
ls = self.mapValues(lambda x: [x])
grouped = ls.reduceByKeyAndWindow(lambda a, b: a.extend(b) or a, lambda a, b: a[len(b):],
windowDuration, slideDuration, numPartitions)
return grouped.mapValues(ResultIterable)
def reduceByKeyAndWindow(self, func, invFunc, windowDuration, slideDuration=None,
numPartitions=None, filterFunc=None):
"""
Return a new DStream by applying incremental `reduceByKey` over a sliding window.
The reduced value of over a new window is calculated using the old window's reduce value :
1. reduce the new values that entered the window (e.g., adding new counts)
2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
`invFunc` can be None, then it will reduce all the RDDs in window, could be slower
than having `invFunc`.
@param func: associative and commutative reduce function
@param invFunc: inverse function of `reduceFunc`
@param windowDuration: width of the window; must be a multiple of this DStream's
batching interval
@param slideDuration: sliding interval of the window (i.e., the interval after which
the new DStream will generate RDDs); must be a multiple of this
DStream's batching interval
@param numPartitions: number of partitions of each RDD in the new DStream.
@param filterFunc: function to filter expired key-value pairs;
only pairs that satisfy the function are retained
set this to null if you do not want to filter
"""
self._validate_window_param(windowDuration, slideDuration)
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
reduced = self.reduceByKey(func, numPartitions)
if invFunc:
def reduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
r = a.union(b).reduceByKey(func, numPartitions) if a else b
if filterFunc:
r = r.filter(filterFunc)
return r
def invReduceFunc(t, a, b):
b = b.reduceByKey(func, numPartitions)
joined = a.leftOuterJoin(b, numPartitions)
return joined.mapValues(lambda kv: invFunc(kv[0], kv[1])
if kv[1] is not None else kv[0])
jreduceFunc = TransformFunction(self._sc, reduceFunc, reduced._jrdd_deserializer)
jinvReduceFunc = TransformFunction(self._sc, invReduceFunc, reduced._jrdd_deserializer)
if slideDuration is None:
slideDuration = self._slideDuration
dstream = self._sc._jvm.PythonReducedWindowedDStream(
reduced._jdstream.dstream(),
jreduceFunc, jinvReduceFunc,
self._ssc._jduration(windowDuration),
self._ssc._jduration(slideDuration))
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
else:
return reduced.window(windowDuration, slideDuration).reduceByKey(func, numPartitions)
def updateStateByKey(self, updateFunc, numPartitions=None, initialRDD=None):
"""
Return a new "state" DStream where the state for each key is updated by applying
the given function on the previous state of the key and the new values of the key.
@param updateFunc: State update function. If this function returns None, then
corresponding state key-value pair will be eliminated.
"""
if numPartitions is None:
numPartitions = self._sc.defaultParallelism
if initialRDD and not isinstance(initialRDD, RDD):
initialRDD = self._sc.parallelize(initialRDD)
def reduceFunc(t, a, b):
if a is None:
g = b.groupByKey(numPartitions).mapValues(lambda vs: (list(vs), None))
else:
g = a.cogroup(b.partitionBy(numPartitions), numPartitions)
g = g.mapValues(lambda ab: (list(ab[1]), list(ab[0])[0] if len(ab[0]) else None))
state = g.mapValues(lambda vs_s: updateFunc(vs_s[0], vs_s[1]))
return state.filter(lambda k_v: k_v[1] is not None)
jreduceFunc = TransformFunction(self._sc, reduceFunc,
self._sc.serializer, self._jrdd_deserializer)
if initialRDD:
initialRDD = initialRDD._reserialize(self._jrdd_deserializer)
dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc,
initialRDD._jrdd)
else:
dstream = self._sc._jvm.PythonStateDStream(self._jdstream.dstream(), jreduceFunc)
return DStream(dstream.asJavaDStream(), self._ssc, self._sc.serializer)
class TransformedDStream(DStream):
"""
TransformedDStream is a DStream generated by an Python function
transforming each RDD of a DStream to another RDDs.
Multiple continuous transformations of DStream can be combined into
one transformation.
"""
def __init__(self, prev, func):
self._ssc = prev._ssc
self._sc = self._ssc._sc
self._jrdd_deserializer = self._sc.serializer
self.is_cached = False
self.is_checkpointed = False
self._jdstream_val = None
# Using type() to avoid folding the functions and compacting the DStreams which is not
# not strictly an object of TransformedDStream.
# Changed here is to avoid bug in KafkaTransformedDStream when calling offsetRanges().
if (type(prev) is TransformedDStream and
not prev.is_cached and not prev.is_checkpointed):
prev_func = prev.func
self.func = lambda t, rdd: func(t, prev_func(t, rdd))
self.prev = prev.prev
else:
self.prev = prev
self.func = func
@property
def _jdstream(self):
if self._jdstream_val is not None:
return self._jdstream_val
jfunc = TransformFunction(self._sc, self.func, self.prev._jrdd_deserializer)
dstream = self._sc._jvm.PythonTransformedDStream(self.prev._jdstream.dstream(), jfunc)
self._jdstream_val = dstream.asJavaDStream()
return self._jdstream_val
|
{
"content_hash": "c4b97fcb59a3c1207498705ece533039",
"timestamp": "",
"source": "github",
"line_count": 629,
"max_line_length": 100,
"avg_line_length": 43.969793322734496,
"alnum_prop": 0.6023068300972629,
"repo_name": "wangyixiaohuihui/spark2-annotation",
"id": "1e6dfde031d5819ad5fa849b3f752bc89c1dffa7",
"size": "28457",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/pyspark/streaming/dstream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "33815"
},
{
"name": "Batchfile",
"bytes": "24294"
},
{
"name": "C",
"bytes": "1542"
},
{
"name": "CSS",
"bytes": "23957"
},
{
"name": "HTML",
"bytes": "10012"
},
{
"name": "HiveQL",
"bytes": "1828674"
},
{
"name": "Java",
"bytes": "3737029"
},
{
"name": "JavaScript",
"bytes": "143063"
},
{
"name": "Makefile",
"bytes": "7980"
},
{
"name": "PLpgSQL",
"bytes": "9666"
},
{
"name": "PowerShell",
"bytes": "3751"
},
{
"name": "Python",
"bytes": "2248750"
},
{
"name": "R",
"bytes": "1027534"
},
{
"name": "Roff",
"bytes": "14420"
},
{
"name": "SQLPL",
"bytes": "3603"
},
{
"name": "Scala",
"bytes": "22897473"
},
{
"name": "Shell",
"bytes": "156941"
},
{
"name": "Thrift",
"bytes": "33665"
},
{
"name": "q",
"bytes": "147332"
}
],
"symlink_target": ""
}
|
import copy
import math
import os
import unittest
from telemetry import decorators
from telemetry.core import util
from telemetry.internal.util import external_modules
try:
np = external_modules.ImportRequiredModule('numpy')
cv2 = external_modules.ImportRequiredModule('cv2')
except (ImportError, NotImplementedError) as err:
pass
else:
# pylint: disable=protected-access
class ScreenFinderTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(ScreenFinderTest, self).__init__(*args, **kwargs)
# Import modules with dependencies that may not be preset in test setup so
# that importing this unit test doesn't cause the test runner to raise an
# exception.
from telemetry.internal.image_processing import fake_frame_generator
from telemetry.internal.image_processing import screen_finder
from telemetry.internal.image_processing import video_file_frame_generator
self.FakeFrameGenerator = fake_frame_generator.FakeFrameGenerator
self.VideoFileFrameGenerator = \
video_file_frame_generator.VideoFileFrameGenerator
self.ScreenFinder = screen_finder.ScreenFinder
def _GetScreenFinder(self, video_filename):
if not video_filename:
fg = self.FakeFrameGenerator()
else:
vid = os.path.join(util.GetUnittestDataDir(), video_filename)
fg = self.VideoFileFrameGenerator(vid)
return self.ScreenFinder(fg)
# https://github.com/catapult-project/catapult/issues/3510
@decorators.Disabled('mac', 'linux')
@decorators.Isolated
def testBasicFunctionality(self):
def CheckCorners(corners, expected):
for i in xrange(len(corners)):
for j in xrange(len(corners[i])):
self.assertAlmostEqual(corners[i][j], expected[i][j], delta=1.1)
expected = [[314, 60], [168, 58], [162, 274], [311, 276]]
sf = self._GetScreenFinder('screen_3_frames.mov')
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height, width = screen.shape[:2]
self.assertAlmostEqual(height, 226, delta=2)
self.assertAlmostEqual(width, 156, delta=2)
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height1, width1 = screen.shape[:2]
self.assertEqual(width, width1)
self.assertEqual(height, height1)
self.assertTrue(sf.HasNext())
screen, corners = sf.GetNext()
CheckCorners(corners, expected)
self.assertIsNotNone(screen)
height2, width2 = screen.shape[:2]
self.assertEqual(width, width2)
self.assertEqual(height, height2)
self.assertFalse(sf.HasNext())
error = ''
try:
sf.GetNext()
except RuntimeError as e:
error = str(e)
self.assertEqual(error, 'No more frames available.')
def testHasMovedFast(self):
sf = self._GetScreenFinder(None)
prev_corners = np.asfarray(([1000, 1000], [0, 1000], [0, 0], [1000, 0]))
self.assertFalse(sf._HasMovedFast(prev_corners, prev_corners))
not_moved = copy.deepcopy(prev_corners)
not_moved[0][1] += 1
not_moved[1][1] += 1
not_moved[3][0] += 0.9
self.assertFalse(sf._HasMovedFast(not_moved, prev_corners))
moved = copy.deepcopy(prev_corners)
moved[0][1] += math.sqrt(0.5)
moved[0][0] += math.sqrt(0.5)
moved[1][1] += 2.1
self.assertTrue(sf._HasMovedFast(moved, prev_corners))
def testPointConnectsToCorners(self):
sf = self._GetScreenFinder(None)
line1 = np.asfarray(((0, 0, 1, 0)))
line2 = np.asfarray(((0, 0, 0, 1)))
point = np.asfarray((0, 0))
point_info = (point, line1, line2)
corners = np.asfarray(((1, 0), (0, 1)))
self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 1))
corners = np.append(corners, (100, 1))
corners = np.append(corners, (1, 100))
corners = corners.reshape(-1, 2)
self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 2))
self.assertFalse(sf._PointConnectsToCorners(corners, point_info, 0.5))
corners = np.append(corners, (100, 0))
corners = np.append(corners, (0, 100))
corners = corners.reshape(-1, 2)
self.assertTrue(sf._PointConnectsToCorners(corners, point_info, 0))
def testFindIntersections(self):
def _BuildResult(point, line1, line2):
return [point, np.asfarray(line1).tolist(), np.asfarray(line2).tolist()]
def _IntersectionResultsToList(results):
result_list = []
for result in results:
point, line1, line2 = result
p = np.round(point).tolist()
l1 = np.round(line1).tolist()
l2 = np.round(line2).tolist()
result_list.append([p, l1, l2])
return result_list
sf = self._GetScreenFinder(None)
expected = []
lines = []
# Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
lines.append(np.asfarray(((0, 1001, 0, -1))))
lines.append(np.asfarray(((-1, 0, 1001, 0))))
lines.append(np.asfarray(((1000, 1001, 1000, -1))))
lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
expected.append(_BuildResult([0, 0], lines[0], lines[1]))
expected.append(_BuildResult([0, 1000], lines[0], lines[3]))
expected.append(_BuildResult([1000, 0], lines[1], lines[2]))
expected.append(_BuildResult([1000, 1000], lines[2], lines[3]))
# crosses 2 lines at 45 degrees.
lines.append(np.asfarray(((0, 500, 500, 0))))
expected.append(_BuildResult([0, 500], lines[0], lines[4]))
expected.append(_BuildResult([500, 0], lines[1], lines[4]))
# crosses 1 line at > 45 degrees, 1 line at < 45 degrees.
lines.append(np.asfarray(((0, 400, 600, 0))))
expected.append(_BuildResult([0, 400], lines[0], lines[5]))
# Test without previous corner data, all intersections should be found.
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
# Now introduce previous corners, but also reset conditions. No
# intersections should be lost.
corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
sf._prev_corners = np.asfarray(corners, np.float32)
sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES + 1
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
# Remove reset conditions, so intersections not near corners will be lost.
sf._lost_corner_frames = sf.RESET_AFTER_N_BAD_FRAMES
# First 4 intersections are the ones at the old corner locations.
expected = expected[0:4]
results = sf._FindIntersections(lines)
result_list = _IntersectionResultsToList(results)
for e in expected:
self.assertIn(e, result_list)
self.assertEqual(len(expected), len(result_list))
def testPointIsCloseToPreviousCorners(self):
sf = self._GetScreenFinder(None)
corners = ((1000, 1000), (0, 1000), (0, 0), (1000, 0))
sf._prev_corners = np.asfarray(corners, np.float32)
dist = math.sqrt(sf.MAX_INTERFRAME_MOTION)
sidedist1 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) - (1e-13)
sidedist2 = math.sqrt(sf.MAX_INTERFRAME_MOTION) / math.sqrt(2) + (1e-13)
point1 = (corners[3][0] + dist, corners[3][1])
self.assertTrue(sf._PointIsCloseToPreviousCorners(point1))
point2 = (corners[3][0] + sidedist1, corners[3][1] + sidedist1)
self.assertTrue(sf._PointIsCloseToPreviousCorners(point2))
point3 = (corners[1][0] + sidedist2, corners[1][1] + sidedist2)
self.assertFalse(sf._PointIsCloseToPreviousCorners(point3))
def testLooksLikeCorner(self):
# TODO: Probably easier to just do end to end tests.
pass
def testCornerData(self):
cd = self.ScreenFinder.CornerData('a', 'b', 'c', 'd', 'e')
self.assertEqual(cd.corner_index, 'a')
self.assertEqual(cd.corner_location, 'b')
self.assertEqual(cd.brightness_score, 'c')
self.assertEqual(cd.line1, 'd')
self.assertEqual(cd.line2, 'e')
cd_list = []
cd_list.append(self.ScreenFinder.CornerData(0, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(3, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(1, None, None, None, None))
cd_list.append(self.ScreenFinder.CornerData(2, None, None, None, None))
cd_list.sort()
for i in range(len(cd_list)):
self.assertEqual(i, cd_list[i].corner_index)
def testFindCorners(self):
# TODO: Probably easier to just do end to end tests.
pass
def testDeDupCorners(self):
sf = self._GetScreenFinder(None)
data = []
lines = []
lines.append(np.asfarray((0, 1001, 0, -1)))
lines.append(np.asfarray((-1, 0, 1001, 0)))
lines.append(np.asfarray((1000, 1001, 1000, -1)))
lines.append(np.asfarray((-1, 1000, 1001, 1000)))
lines.append(np.asfarray((0, 10, 10, 0)))
lines.append(np.asfarray((-1, 1001, 1001, 1001)))
corners = np.asfarray(((1000, 1000), (0, 1000), (0, 0),
(1000, 0), (0, 10), (10, 0), (1000, 1001)))
data.append(self.ScreenFinder.CornerData(2, corners[2], 100,
lines[0], lines[1]))
data.append(self.ScreenFinder.CornerData(1, corners[1], 100,
lines[0], lines[3]))
data.append(self.ScreenFinder.CornerData(3, corners[3], 100,
lines[1], lines[2]))
data.append(self.ScreenFinder.CornerData(0, corners[0], 100,
lines[2], lines[3]))
data.append(self.ScreenFinder.CornerData(2, corners[4], 120,
lines[0], lines[4]))
data.append(self.ScreenFinder.CornerData(2, corners[5], 110,
lines[1], lines[4]))
data.append(self.ScreenFinder.CornerData(0, corners[6], 110,
lines[2], lines[5]))
dedup = copy.copy(data)
# Tests 2 non-duplicate corners, 1 corner with connected and unconnected
# corners, and 1 corner with two connected corners.
sf._DeDupCorners(dedup, corners)
self.assertEqual(len(dedup), 4)
self.assertIn(data[0], dedup)
self.assertIn(data[1], dedup)
self.assertIn(data[2], dedup)
self.assertIn(data[6], dedup)
# Same test, but this time the corner with connected and unconnected
# corners now only contains unconnected corners.
del data[0]
corners = np.delete(corners, 2, axis=0)
dedup2 = copy.copy(data)
sf._DeDupCorners(dedup2, corners)
self.assertEqual(len(dedup2), 4)
self.assertIn(data[3], dedup2)
self.assertIn(data[0], dedup2)
self.assertIn(data[1], dedup2)
self.assertIn(data[5], dedup2)
def testFindExactCorners(self):
sf = self._GetScreenFinder(None)
img = np.zeros((3, 3), np.uint8)
img[1][0] = 255
img[0][1] = 255
img[1][2] = 255
img[2][1] = 255
sf._frame_edges = img
corners = np.asfarray([(1, 1), (1, 1), (1, 1), (1, 1)])
expected = np.asfarray([(2, 0), (0, 0), (0, 2), (2, 2)])
ret = sf._FindExactCorners(corners)
np.testing.assert_equal(ret, expected)
img2 = np.zeros((3, 3), np.uint8)
img2[1][0] = 255
img2[1][1] = 255
img2[2][2] = 255
img2[2][1] = 255
sf._frame_edges = img2
expected2 = [(2, 1), (0, 1), (0, 2), (2, 2)]
ret2 = sf._FindExactCorners(corners)
np.testing.assert_equal(ret2, expected2)
def testSmoothCorners(self):
sf = self._GetScreenFinder(None)
corners = [[10, 10], [10, 10], [10, 10], [10, 10]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, corners)
corners = [[0, 0], [0, 0], [0, 0], [0, 0]]
expected = [[5, 5], [5, 5], [5, 5], [5, 5]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, expected)
expected = [[2.5, 2.5], [2.5, 2.5], [2.5, 2.5], [2.5, 2.5]]
ret = sf._SmoothCorners(corners).tolist()
self.assertListEqual(ret, expected)
def testGetTransform(self):
sf = self._GetScreenFinder(None)
corners = np.array([[100, 1000], [0, 1000], [0, 0], [100, 0]], np.float32)
transform, w, h = sf._GetTransform(corners, 1)
transform = np.round(transform, 2)
expected = [[1., 0., 1.], [-0., -1., 1001.], [0., -0., 1.]]
self.assertListEqual(transform.tolist(), expected)
self.assertEqual(w, 102)
self.assertEqual(h, 1002)
corners = np.array([(200, 2000), (0, 2000), (0, 0), (200, 0)], np.float32)
transform, w, h = sf._GetTransform(corners, 5)
transform = np.round(transform, 2)
expected = [[0.5, 0.0, 5.0], [-0.0, -0.5, 1005.0], [-0.0, 0.0, 1.0]]
self.assertListEqual(transform.tolist(), expected)
self.assertEqual(w, 110)
self.assertEqual(h, 1010)
def testNewScreenLocation(self):
sf = self._GetScreenFinder(None)
corners_2 = np.asfarray([[np.nan, np.nan], [0, 1000], [np.nan, np.nan],
[1000, 0]])
corners_3 = np.asfarray([[1000, 1000], [0, 1000], [np.nan, np.nan],
[1000, 0]])
corners_4 = np.asfarray([[1000, 1000], [0, 1000], [0, 0], [1000, 0]])
lines = []
# Box with corners at (0, 0), (1000, 0), (0, 1000), (1000, 1000)
lines.append(np.asfarray(((0, 1001, 0, -1))))
lines.append(np.asfarray(((-1, 0, 1001, 0))))
lines.append(np.asfarray(((1000, 1001, 1000, -1))))
lines.append(np.asfarray(((-1, 1000, 1001, 1000))))
# Additional intersections near a corner.
lines.append(np.asfarray(((0, 3, 7, 0))))
lines.append(np.asfarray(((0, 4, 6, 0))))
intersections = sf._FindIntersections(lines)
failed = False
try:
sf._NewScreenLocation(corners_3, 1, intersections)
except self.ScreenFinder.ScreenNotFoundError:
failed = True
self.assertTrue(failed)
sf._lost_corner_frames = 10
sf._lost_corners = [True, True, True, True]
ret = sf._NewScreenLocation(corners_4, 0, intersections)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, False, False])
self.assertEqual(sf._lost_corner_frames, 0)
sf._prev_corners = corners_4
ret = sf._NewScreenLocation(corners_3, 1, intersections)
ret = np.round(ret)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, True, False])
self.assertEqual(sf._lost_corner_frames, 1)
sf._prev_corners = np.asfarray([(1000, 1000), (0, 1000),
(0, 3), (1000, 0)])
ret = sf._NewScreenLocation(corners_3, 1, intersections)
ret = np.round(ret)
np.testing.assert_equal(ret, corners_4)
self.assertListEqual(sf._lost_corners, [False, False, True, False])
self.assertEqual(sf._lost_corner_frames, 2)
ret = sf._NewScreenLocation(corners_2, 2, intersections)
ret = np.round(ret)
expected = [[1000, 1000], [0, 1000], [0, 3], [1000, 0]]
np.testing.assert_equal(ret, expected)
self.assertListEqual(sf._lost_corners, [True, False, True, False])
self.assertEqual(sf._lost_corner_frames, 3)
|
{
"content_hash": "6a6e229a924cd3a246a2235930b679f5",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 80,
"avg_line_length": 43.06521739130435,
"alnum_prop": 0.6096037354871278,
"repo_name": "catapult-project/catapult-csm",
"id": "d8c294fbcf66c79abde858b868f5c45d949dbeb5",
"size": "16011",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "telemetry/telemetry/internal/image_processing/screen_finder_unittest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4902"
},
{
"name": "C++",
"bytes": "43728"
},
{
"name": "CSS",
"bytes": "24873"
},
{
"name": "Go",
"bytes": "80325"
},
{
"name": "HTML",
"bytes": "11817766"
},
{
"name": "JavaScript",
"bytes": "518002"
},
{
"name": "Makefile",
"bytes": "1588"
},
{
"name": "Python",
"bytes": "6207634"
},
{
"name": "Shell",
"bytes": "2558"
}
],
"symlink_target": ""
}
|
"""
.. module: security_monkey.tests.auditors.gcp.gcs.test_bucket
:platform: Unix
.. version:: $$VERSION$$
.. moduleauthor:: Tom Melendez <supertom@google.com> @supertom
"""
from security_monkey.tests import SecurityMonkeyTestCase
ACL_LIST = [
{'role': 'OWNER', 'entity': 'project-editors-2094195755359'},
{'role': 'READER', 'entity': 'project-viewers-2094195755359'},
{'role': 'WRITER', 'entity': 'project-writer-2094195755359'}
]
ACL_LIST_TWO_OWNERS = [
{'role': 'OWNER', 'entity': 'project-editors-2094195755359'},
{'role': 'READER', 'entity': 'project-viewers-2094195755359'},
{'role': 'OWNER', 'entity': 'project-editors-2094195755359'}
]
ACL_LIST_ALLUSERS = [
{'role': 'OWNER', 'entity': 'project-editors-2094195755359'},
{'role': 'READER', 'entity': 'allUsers'},
{'role': 'OWNER', 'entity': 'project-editors-2094195755359'}
]
class BucketTestCase(SecurityMonkeyTestCase):
def test__acl_allusers_exists(self):
from security_monkey.auditors.gcp.gcs.bucket import GCSBucketAuditor
auditor = GCSBucketAuditor(accounts=['unittest'])
actual = auditor._acl_allusers_exists(ACL_LIST)
self.assertFalse(actual)
actual = auditor._acl_allusers_exists(ACL_LIST_ALLUSERS)
self.assertTrue(actual)
def test__acl_max_owners(self):
from security_monkey.auditors.gcp.gcs.bucket import GCSBucketAuditor
auditor = GCSBucketAuditor(accounts=['unittest'])
# NOTE: the config value below actually controls this so ensure
# it is set to 1
auditor.gcp_config.MAX_OWNERS_PER_BUCKET = 1
actual = auditor._acl_max_owners(ACL_LIST)
self.assertFalse(actual)
actual = auditor._acl_max_owners(ACL_LIST_TWO_OWNERS)
self.assertTrue(actual)
|
{
"content_hash": "06422c6830e4452221f9ed153d884c9c",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 76,
"avg_line_length": 36.69387755102041,
"alnum_prop": 0.6657397107897665,
"repo_name": "Netflix/security_monkey",
"id": "5dc02be173ff3202afb2c730aafe29f5cefbbf5e",
"size": "2414",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "security_monkey/tests/auditors/gcp/gcs/test_bucket.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "22837"
},
{
"name": "Dart",
"bytes": "130852"
},
{
"name": "Dockerfile",
"bytes": "3841"
},
{
"name": "HTML",
"bytes": "120266"
},
{
"name": "JavaScript",
"bytes": "13728"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1578684"
},
{
"name": "Shell",
"bytes": "30939"
}
],
"symlink_target": ""
}
|
import mock
from cinderclient.v1 import client as cinder_client
from oslotest import mockpatch
from cloudferrylib.os.storage import cinder_storage
from cloudferrylib.utils import utils
from tests import test
FAKE_CONFIG = utils.ext_dict(
cloud=utils.ext_dict({'user': 'fake_user',
'password': 'fake_password',
'tenant': 'fake_tenant',
'host': '1.1.1.1',
'auth_url': 'http://1.1.1.1:35357/v2.0/'}),
migrate=utils.ext_dict({'speed_limit': '10MB',
'retry': '7',
'time_wait': 5,
'keep_volume_storage': False,
'keep_volume_snapshots': False}),
mysql=utils.ext_dict({'host': '1.1.1.1'}),
storage=utils.ext_dict({'backend': 'ceph',
'rbd_pool': 'volumes',
'volume_name_template': 'volume-',
'host': '1.1.1.1'}))
class CinderStorageTestCase(test.TestCase):
def setUp(self):
super(CinderStorageTestCase, self).setUp()
self.mock_client = mock.Mock()
self.cs_patch = mockpatch.PatchObject(cinder_client, 'Client',
new=self.mock_client)
self.useFixture(self.cs_patch)
self.identity_mock = mock.Mock()
self.compute_mock = mock.Mock()
self.fake_cloud = mock.Mock()
self.fake_cloud.position = 'src'
self.fake_cloud.resources = dict(identity=self.identity_mock,
compute=self.compute_mock)
with mock.patch(
'cloudferrylib.os.storage.cinder_storage.mysql_connector'):
self.cinder_client = cinder_storage.CinderStorage(FAKE_CONFIG,
self.fake_cloud)
self.fake_volume_0 = mock.Mock()
self.fake_volume_1 = mock.Mock()
self.mock_client().volumes.get.return_value = self.fake_volume_0
def test_get_cinder_client(self):
# To check self.mock_client call only from this test method
self.mock_client.reset_mock()
client = self.cinder_client.get_client(FAKE_CONFIG)
self.mock_client.assert_called_once_with('fake_user', 'fake_password',
'fake_tenant',
'http://1.1.1.1:35357/v2.0/')
self.assertEqual(self.mock_client(), client)
def test_get_volumes_list(self):
fake_volume_list = [self.fake_volume_0, self.fake_volume_1]
self.mock_client().volumes.list.return_value = fake_volume_list
volumes_list = self.cinder_client.get_volumes_list(search_opts=dict())
self.mock_client().volumes.list.assert_called_once_with(True, dict(all_tenants=True))
self.assertEqual(volumes_list, fake_volume_list)
def test_create_volume(self):
self.mock_client().volumes.create.return_value = self.fake_volume_0
volume = self.cinder_client.create_volume(100500, name='fake')
self.mock_client().volumes.create.assert_called_once_with(100500,
name='fake')
self.assertEqual(self.fake_volume_0, volume)
def test_get_volume_by_id(self):
volume = self.cinder_client.get_volume_by_id('fake_id')
self.mock_client().volumes.get.assert_called_once_with('fake_id')
self.assertEqual(self.fake_volume_0, volume)
def test_delete_volume(self):
self.cinder_client.delete_volume('fake_id')
self.mock_client().volumes.get.assert_called_once_with('fake_id')
self.mock_client().volumes.delete.assert_called_once_with(
self.fake_volume_0)
def test_update_volume(self):
self.cinder_client.update_volume('fake_id', name='new_fake_name')
self.mock_client().volumes.get.assert_called_once_with('fake_id')
self.mock_client().volumes.update.assert_called_once_with(
self.fake_volume_0, name='new_fake_name')
def test_attach_volume(self):
self.mock_client().volumes.attach.return_value = (
'fake_response', 'fake_body')
response, body = self.cinder_client.attach_volume('fake_vol_id',
'fake_instance_id',
'/fake/mountpoint')
test_args = {'instance_uuid': 'fake_instance_id',
'mountpoint': '/fake/mountpoint',
'mode': 'rw'}
self.mock_client().volumes.get.assert_called_once_with('fake_vol_id')
self.mock_client().volumes.attach.assert_called_once_with(
self.fake_volume_0, **test_args)
self.assertEqual(('fake_response', 'fake_body'), (response, body))
def test_detach_volume(self):
self.mock_client().volumes.detach.return_value = (
'fake_response', 'fake_body')
response, body = self.cinder_client.detach_volume('fake_vl_id')
self.mock_client().volumes.detach.assert_called_once_with('fake_vl_id')
self.assertEqual(('fake_response', 'fake_body'), (response, body))
def test_upload_volume_to_image(self):
image = {'os-volume_upload_image': {'image_id': "fake_body"}}
self.mock_client().volumes.upload_to_image.return_value = (
'fake_response', image)
response, body = self.cinder_client.upload_volume_to_image(
'fake_vol_id', True, 'fake_image_name', 'fake_cont_format',
'fake_disk_format')
test_args = {'volume': self.fake_volume_0,
'container_format': 'fake_cont_format',
'force': True,
'image_name': 'fake_image_name',
'disk_format': 'fake_disk_format'}
self.mock_client().volumes.get.assert_called_once_with('fake_vol_id')
self.mock_client().volumes.upload_to_image.assert_called_once_with(
**test_args)
self.assertEqual(('fake_response', 'fake_body'), (response, body))
def test_read_info(self):
temp = self.cinder_client.get_volumes_list
self.cinder_client.get_volumes_list = mock.Mock()
vol1 = mock.Mock(id="id1",
size='size',
display_name='display_name',
display_description='display_description',
availability_zone='availability_zone',
volume_type='volume_type',
attachments=[{'device': 'device'}],
bootable='bootable')
self.cinder_client.get_volumes_list.return_value = [vol1]
res = self.cinder_client.read_info(id="id1")
self.assertIn('volumes', res)
self.assertEqual(1, len(res['volumes']))
self.assertEqual(vol1.id, res['volumes']['id1']['volume']['id'])
self.cinder_client.get_volumes_list = temp
def test_deploy(self):
vol = {'volume': {'size': 'size1',
'display_name': 'display_name1',
'display_description': 'display_description1',
'volume_type': 'volume_type1',
'availability_zone': 'availability_zone1'},
'meta': {'image': {'id': 'image_id1'}}}
info = {'volumes': {'id1': vol}}
create_volume = mock.Mock()
vol_return = mock.Mock(id="id2")
create_volume.return_value = vol_return
wait_for_status = mock.Mock()
finish = mock.Mock()
attach_vol_to_instance = mock.Mock()
self.cinder_client.create_volume = create_volume
self.cinder_client.wait_for_status = wait_for_status
self.cinder_client.finish = finish
self.cinder_client.attach_volume_to_instance = attach_vol_to_instance
res = self.cinder_client.deploy(info)
self.assertIn(vol_return.id, res)
def test_get_volume_path_iscsi(self):
fake_mysql_return = ('fake_ip:fake_port,3 iqn.2010-10.org.openstack:'
'volume-fake_volume_id fake_lun',)
self.fake_cloud.mysql_connector.execute().fetchone.return_value = (
fake_mysql_return)
volume_path = self.cinder_client.get_volume_path_iscsi('fake_vol_id')
expected_volume_path = (
'/dev/disk/by-path/ip-fake_ip:fake_port-iscsi-iqn.2010-10.org.'
'openstack:volume-fake_volume_id-lun-fake_lun')
self.assertEqual(expected_volume_path, volume_path)
self.fake_cloud.mysql_connector.execute.assert_called_with(
"SELECT provider_location FROM volumes WHERE id='fake_vol_id';")
def test_get_volume_path_iscsi_error(self):
fake_mysql_return = None
self.fake_cloud.mysql_connector.execute.return_value = (
fake_mysql_return)
expected_msg = ('There is no such raw in Cinder DB with the specified '
'volume_id=fake_vol_id')
try:
self.cinder_client.get_volume_path_iscsi('fake_vol_id')
except Exception as e:
self.assertEqual(expected_msg, e.message)
self.fake_cloud.mysql_connector.execute.assert_called_once_with(
"SELECT provider_location FROM volumes WHERE id='fake_vol_id';")
self.assertRaises(Exception,
self.cinder_client.get_volume_path_iscsi,
'fake_vol_id')
|
{
"content_hash": "043616f21ad4456b0ad55adb58db5ddf",
"timestamp": "",
"source": "github",
"line_count": 224,
"max_line_length": 93,
"avg_line_length": 42.857142857142854,
"alnum_prop": 0.56375,
"repo_name": "Settis/CloudFerry",
"id": "399eb5c145b466f326146626fde5074df56558a7",
"size": "10230",
"binary": false,
"copies": "2",
"ref": "refs/heads/devel",
"path": "tests/cloudferrylib/os/storage/test_cinder_storage.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "362999"
}
],
"symlink_target": ""
}
|
"""
Bitbake "Fetch" implementation for osc (Opensuse build service client).
Based on the svn "Fetch" implementation.
"""
import os
import sys
import bb
from bb import data
from bb.fetch import Fetch
from bb.fetch import FetchError
from bb.fetch import MissingParameterError
from bb.fetch import runfetchcmd
class Osc(Fetch):
"""Class to fetch a module or modules from Opensuse build server
repositories."""
def supports(self, url, ud, d):
"""
Check to see if a given url can be fetched with osc.
"""
return ud.type in ['osc']
def localpath(self, url, ud, d):
if not "module" in ud.parm:
raise MissingParameterError("osc method needs a 'module' parameter.")
ud.module = ud.parm["module"]
# Create paths to osc checkouts
relpath = ud.path
if relpath.startswith('/'):
# Remove leading slash as os.path.join can't cope
relpath = relpath[1:]
ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host)
ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module)
if 'rev' in ud.parm:
ud.revision = ud.parm['rev']
else:
pv = data.getVar("PV", d, 0)
rev = Fetch.srcrev_internal_helper(ud, d)
if rev and rev != True:
ud.revision = rev
else:
ud.revision = ""
ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d)
return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
def _buildosccommand(self, ud, d, command):
"""
Build up an ocs commandline based on ud
command is "fetch", "update", "info"
"""
basecmd = data.expand('${FETCHCMD_osc}', d)
proto = "ocs"
if "proto" in ud.parm:
proto = ud.parm["proto"]
options = []
config = "-c %s" % self.generate_config(ud, d)
if ud.revision:
options.append("-r %s" % ud.revision)
coroot = ud.path
if coroot.startswith('/'):
# Remove leading slash as os.path.join can't cope
coroot= coroot[1:]
if command is "fetch":
osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options))
elif command is "update":
osccmd = "%s %s up %s" % (basecmd, config, " ".join(options))
else:
raise FetchError("Invalid osc command %s" % command)
return osccmd
def go(self, loc, ud, d):
"""
Fetch url
"""
# Try to use the tarball stash
if Fetch.try_mirror(d, ud.localfile):
bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping osc checkout." % ud.localpath)
return
bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'")
if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK):
oscupdatecmd = self._buildosccommand(ud, d, "update")
bb.msg.note(1, bb.msg.domain.Fetcher, "Update "+ loc)
# update sources there
os.chdir(ud.moddir)
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscupdatecmd)
runfetchcmd(oscupdatecmd, d)
else:
oscfetchcmd = self._buildosccommand(ud, d, "fetch")
bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc)
# check out sources there
bb.mkdirhier(ud.pkgdir)
os.chdir(ud.pkgdir)
bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscfetchcmd)
runfetchcmd(oscfetchcmd, d)
os.chdir(os.path.join(ud.pkgdir + ud.path))
# tar them up to a defined filename
try:
runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d)
except:
t, v, tb = sys.exc_info()
try:
os.unlink(ud.localpath)
except OSError:
pass
raise t, v, tb
def supports_srcrev(self):
return False
def generate_config(self, ud, d):
"""
Generate a .oscrc to be used for this run.
"""
config_path = "%s/oscrc" % data.expand('${OSCDIR}', d)
if (os.path.exists(config_path)):
os.remove(config_path)
f = open(config_path, 'w')
f.write("[general]\n")
f.write("apisrv = %s\n" % ud.host)
f.write("scheme = http\n")
f.write("su-wrapper = su -c\n")
f.write("build-root = %s\n" % data.expand('${WORKDIR}', d))
f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n")
f.write("extra-pkgs = gzip\n")
f.write("\n")
f.write("[%s]\n" % ud.host)
f.write("user = %s\n" % ud.parm["user"])
f.write("pass = %s\n" % ud.parm["pswd"])
f.close()
return config_path
|
{
"content_hash": "c59d6998dd63009b2346a2ddc561f8ae",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 132,
"avg_line_length": 33.47712418300654,
"alnum_prop": 0.5384615384615384,
"repo_name": "nlebedenco/mini2440",
"id": "2c34caf6c9a61651d2ad5a414ac63f12e99e167f",
"size": "5211",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bitbake/lib/bb/fetch/osc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3327735"
},
{
"name": "C++",
"bytes": "181067"
},
{
"name": "D",
"bytes": "3438"
},
{
"name": "JavaScript",
"bytes": "3401"
},
{
"name": "PHP",
"bytes": "9144"
},
{
"name": "Perl",
"bytes": "662"
},
{
"name": "Python",
"bytes": "656793"
},
{
"name": "Shell",
"bytes": "202067"
},
{
"name": "Vim script",
"bytes": "4556"
}
],
"symlink_target": ""
}
|
from myhdl import Signal, intbv
def _input_serdes(serial_in_p, serial_in_n, data):
mod_insts = []
return mod_insts
def _output_serdes(data, serial_out_p, serial_out_n):
mod_insts = []
return mod_insts
def io_serdes(serial_in_p, serial_in_n,
serial_out_p, serial_out_n):
""" Vendor specific IO SERDES
"""
mod_insts = []
return mod_insts
|
{
"content_hash": "b9c27563a9a305cbebb36d864406a456",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 53,
"avg_line_length": 17.043478260869566,
"alnum_prop": 0.6071428571428571,
"repo_name": "cfelton/parallella_elink",
"id": "7b57c9c78c2e51076c2900b729d934e6740090d5",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "elink/vendor/_io_serdes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "538"
},
{
"name": "Python",
"bytes": "93376"
},
{
"name": "Shell",
"bytes": "197"
},
{
"name": "Verilog",
"bytes": "15487"
}
],
"symlink_target": ""
}
|
from django.core.urlresolvers import NoReverseMatch # noqa
from django.core.urlresolvers import reverse
from django.http import HttpResponse # noqa
from django.template import defaultfilters as filters
from django.utils import html
from django.utils.http import urlencode
from django.utils import safestring
from django.utils.translation import pgettext_lazy
from django.utils.translation import string_concat # noqa
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from horizon import exceptions
from horizon import messages
from horizon import tables
from openstack_dashboard import api
from openstack_dashboard.api import cinder
from openstack_dashboard import policy
DELETABLE_STATES = ("available", "error", "error_extending")
class VolumePolicyTargetMixin(policy.PolicyTargetMixin):
policy_target_attrs = (("project_id", 'os-vol-tenant-attr:tenant_id'),)
class LaunchVolume(tables.LinkAction):
name = "launch_volume"
verbose_name = _("Launch as Instance")
url = "horizon:project:instances:launch"
classes = ("ajax-modal", "btn-launch")
icon = "cloud-upload"
policy_rules = (("compute", "compute:create"),)
def get_link_url(self, datum):
base_url = reverse(self.url)
vol_id = "%s:vol" % self.table.get_object_id(datum)
params = urlencode({"source_type": "volume_id",
"source_id": vol_id})
return "?".join([base_url, params])
def allowed(self, request, volume=None):
if getattr(volume, 'bootable', '') == 'true':
return volume.status == "available"
return False
class DeleteVolume(VolumePolicyTargetMixin, tables.DeleteAction):
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete Volume",
u"Delete Volumes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Scheduled deletion of Volume",
u"Scheduled deletion of Volumes",
count
)
policy_rules = (("volume", "volume:delete"),)
def delete(self, request, obj_id):
cinder.volume_delete(request, obj_id)
def allowed(self, request, volume=None):
if volume:
return (volume.status in DELETABLE_STATES and
not getattr(volume, 'has_snapshot', False))
return True
class CreateVolume(tables.LinkAction):
name = "create"
verbose_name = _("Create Volume")
url = "horizon:project:volumes:volumes:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("volume", "volume:create"),)
ajax = True
def __init__(self, attrs=None, **kwargs):
kwargs['preempt'] = True
super(CreateVolume, self).__init__(attrs, **kwargs)
def allowed(self, request, volume=None):
limits = api.cinder.tenant_absolute_limits(request)
gb_available = (limits.get('maxTotalVolumeGigabytes', float("inf"))
- limits.get('totalGigabytesUsed', 0))
volumes_available = (limits.get('maxTotalVolumes', float("inf"))
- limits.get('totalVolumesUsed', 0))
if gb_available <= 0 or volumes_available <= 0:
if "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
else:
self.verbose_name = _("Create Volume")
classes = [c for c in self.classes if c != "disabled"]
self.classes = classes
return True
def single(self, table, request, object_id=None):
self.allowed(request, None)
return HttpResponse(self.render())
class ExtendVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "extend"
verbose_name = _("Extend Volume")
url = "horizon:project:volumes:volumes:extend"
classes = ("ajax-modal", "btn-extend")
policy_rules = (("volume", "volume:extend"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class EditAttachments(tables.LinkAction):
name = "attachments"
verbose_name = _("Manage Attachments")
url = "horizon:project:volumes:volumes:attach"
classes = ("ajax-modal",)
icon = "pencil"
def allowed(self, request, volume=None):
if volume:
project_id = getattr(volume, "os-vol-tenant-attr:tenant_id", None)
attach_allowed = \
policy.check((("compute", "compute:attach_volume"),),
request,
{"project_id": project_id})
detach_allowed = \
policy.check((("compute", "compute:detach_volume"),),
request,
{"project_id": project_id})
if attach_allowed or detach_allowed:
return volume.status in ("available", "in-use")
return False
class CreateSnapshot(VolumePolicyTargetMixin, tables.LinkAction):
name = "snapshots"
verbose_name = _("Create Snapshot")
url = "horizon:project:volumes:volumes:create_snapshot"
classes = ("ajax-modal",)
icon = "camera"
policy_rules = (("volume", "volume:create_snapshot"),)
def allowed(self, request, volume=None):
try:
limits = api.cinder.tenant_absolute_limits(request)
except Exception:
exceptions.handle(request, _('Unable to retrieve tenant limits.'))
limits = {}
snapshots_available = (limits.get('maxTotalSnapshots', float("inf"))
- limits.get('totalSnapshotsUsed', 0))
if snapshots_available <= 0 and "disabled" not in self.classes:
self.classes = [c for c in self.classes] + ['disabled']
self.verbose_name = string_concat(self.verbose_name, ' ',
_("(Quota exceeded)"))
return volume.status in ("available", "in-use")
class CreateTransfer(VolumePolicyTargetMixin, tables.LinkAction):
name = "create_transfer"
verbose_name = _("Create Transfer")
url = "horizon:project:volumes:volumes:create_transfer"
classes = ("ajax-modal",)
policy_rules = (("volume", "volume:create_transfer"),)
def allowed(self, request, volume=None):
return volume.status == "available"
class CreateBackup(VolumePolicyTargetMixin, tables.LinkAction):
name = "backups"
verbose_name = _("Create Backup")
url = "horizon:project:volumes:volumes:create_backup"
classes = ("ajax-modal",)
policy_rules = (("volume", "backup:create"),)
def allowed(self, request, volume=None):
return (cinder.volume_backup_supported(request) and
volume.status == "available")
class UploadToImage(VolumePolicyTargetMixin, tables.LinkAction):
name = "upload_to_image"
verbose_name = _("Upload to Image")
url = "horizon:project:volumes:volumes:upload_to_image"
classes = ("ajax-modal",)
icon = "cloud-upload"
policy_rules = (("volume", "volume:upload_to_image"),)
def allowed(self, request, volume=None):
has_image_service_perm = \
request.user.has_perm('openstack.services.image')
return (volume.status in ("available", "in-use") and
has_image_service_perm)
class EditVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "edit"
verbose_name = _("Edit Volume")
url = "horizon:project:volumes:volumes:update"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:update"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class RetypeVolume(VolumePolicyTargetMixin, tables.LinkAction):
name = "retype"
verbose_name = _("Change Volume Type")
url = "horizon:project:volumes:volumes:retype"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("volume", "volume:retype"),)
def allowed(self, request, volume=None):
return volume.status in ("available", "in-use")
class AcceptTransfer(tables.LinkAction):
name = "accept_transfer"
verbose_name = _("Accept Transfer")
url = "horizon:project:volumes:volumes:accept_transfer"
classes = ("ajax-modal",)
icon = "exchange"
policy_rules = (("volume", "volume:accept_transfer"),)
ajax = True
def single(self, table, request, object_id=None):
return HttpResponse(self.render())
class DeleteTransfer(VolumePolicyTargetMixin, tables.Action):
# This class inherits from tables.Action instead of the more obvious
# tables.DeleteAction due to the confirmation message. When the delete
# is successful, DeleteAction automatically appends the name of the
# volume to the message, e.g. "Deleted volume transfer 'volume'". But
# we are deleting the volume *transfer*, whose name is different.
name = "delete_transfer"
verbose_name = _("Cancel Transfer")
policy_rules = (("volume", "volume:delete_transfer"),)
classes = ('btn-danger',)
def allowed(self, request, volume):
return (volume.status == "awaiting-transfer" and
getattr(volume, 'transfer', None))
def single(self, table, request, volume_id):
volume = table.get_object_by_id(volume_id)
try:
cinder.transfer_delete(request, volume.transfer.id)
if volume.transfer.name:
msg = _('Successfully deleted volume transfer "%s"'
) % volume.transfer.name
else:
msg = _("Successfully deleted volume transfer")
messages.success(request, msg)
except Exception:
exceptions.handle(request, _("Unable to delete volume transfer."))
class UpdateRow(tables.Row):
ajax = True
def get_data(self, request, volume_id):
volume = cinder.volume_get(request, volume_id)
return volume
def get_size(volume):
return _("%sGB") % volume.size
def get_attachment_name(request, attachment):
server_id = attachment.get("server_id", None)
if "instance" in attachment and attachment['instance']:
name = attachment["instance"].name
else:
try:
server = api.nova.server_get(request, server_id)
name = server.name
except Exception:
name = None
exceptions.handle(request, _("Unable to retrieve "
"attachment information."))
try:
url = reverse("horizon:project:instances:detail", args=(server_id,))
instance = '<a href="%s">%s</a>' % (url, html.escape(name))
except NoReverseMatch:
instance = name
return instance
class AttachmentColumn(tables.Column):
"""Customized column class.
So it that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, volume):
request = self.table.request
link = _('Attached to %(instance)s on %(dev)s')
attachments = []
# Filter out "empty" attachments which the client returns...
for attachment in [att for att in volume.attachments if att]:
# When a volume is attached it may return the server_id
# without the server name...
instance = get_attachment_name(request, attachment)
vals = {"instance": instance,
"dev": html.escape(attachment.get("device", ""))}
attachments.append(link % vals)
return safestring.mark_safe(", ".join(attachments))
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
class VolumesTableBase(tables.DataTable):
STATUS_CHOICES = (
("in-use", True),
("available", True),
("creating", None),
("error", False),
("error_extending", False),
)
STATUS_DISPLAY_CHOICES = (
("available", pgettext_lazy("Current status of a Volume",
u"Available")),
("in-use", pgettext_lazy("Current status of a Volume", u"In-use")),
("error", pgettext_lazy("Current status of a Volume", u"Error")),
("creating", pgettext_lazy("Current status of a Volume",
u"Creating")),
("error_extending", pgettext_lazy("Current status of a Volume",
u"Error Extending")),
)
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
description = tables.Column("description",
verbose_name=_("Description"),
truncate=40)
size = tables.Column(get_size,
verbose_name=_("Size"),
attrs={'data-type': 'size'})
status = tables.Column("status",
verbose_name=_("Status"),
status=True,
status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES)
def get_object_display(self, obj):
return obj.name
class VolumesFilterAction(tables.FilterAction):
def filter(self, table, volumes, filter_string):
"""Naive case-insensitive search."""
q = filter_string.lower()
return [volume for volume in volumes
if q in volume.name.lower()]
class VolumesTable(VolumesTableBase):
name = tables.Column("name",
verbose_name=_("Name"),
link="horizon:project:volumes:volumes:detail")
volume_type = tables.Column(get_volume_type,
verbose_name=_("Type"))
attachments = AttachmentColumn("attachments",
verbose_name=_("Attached To"))
availability_zone = tables.Column("availability_zone",
verbose_name=_("Availability Zone"))
bootable = tables.Column('is_bootable',
verbose_name=_("Bootable"),
filters=(filters.yesno, filters.capfirst))
encryption = tables.Column(get_encrypted_value,
verbose_name=_("Encrypted"),
link="horizon:project:volumes:"
"volumes:encryption_detail")
class Meta(object):
name = "volumes"
verbose_name = _("Volumes")
status_columns = ["status"]
row_class = UpdateRow
table_actions = (CreateVolume, AcceptTransfer, DeleteVolume,
VolumesFilterAction)
row_actions = (EditVolume, ExtendVolume, LaunchVolume, EditAttachments,
CreateSnapshot, CreateBackup, RetypeVolume,
UploadToImage, CreateTransfer, DeleteTransfer,
DeleteVolume)
class DetachVolume(tables.BatchAction):
name = "detach"
classes = ('btn-danger', 'btn-detach')
policy_rules = (("compute", "compute:detach_volume"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Detach Volume",
u"Detach Volumes",
count
)
# This action is asynchronous.
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Detaching Volume",
u"Detaching Volumes",
count
)
def action(self, request, obj_id):
attachment = self.table.get_object_by_id(obj_id)
api.nova.instance_volume_detach(request,
attachment.get('server_id', None),
obj_id)
def get_success_url(self, request):
return reverse('horizon:project:volumes:index')
class AttachedInstanceColumn(tables.Column):
"""Customized column class that does complex processing on the attachments
for a volume instance.
"""
def get_raw_data(self, attachment):
request = self.table.request
return safestring.mark_safe(get_attachment_name(request, attachment))
class AttachmentsTable(tables.DataTable):
instance = AttachedInstanceColumn(get_attachment_name,
verbose_name=_("Instance"))
device = tables.Column("device",
verbose_name=_("Device"))
def get_object_id(self, obj):
return obj['id']
def get_object_display(self, attachment):
instance_name = get_attachment_name(self.request, attachment)
vals = {"volume_name": attachment['volume_name'],
"instance_name": html.strip_tags(instance_name)}
return _("Volume %(volume_name)s on instance %(instance_name)s") % vals
def get_object_by_id(self, obj_id):
for obj in self.data:
if self.get_object_id(obj) == obj_id:
return obj
raise ValueError('No match found for the id "%s".' % obj_id)
class Meta(object):
name = "attachments"
verbose_name = _("Attachments")
table_actions = (DetachVolume,)
row_actions = (DetachVolume,)
|
{
"content_hash": "a4bf1184c76508d8065c1baaee4b8451",
"timestamp": "",
"source": "github",
"line_count": 491,
"max_line_length": 79,
"avg_line_length": 35.87983706720978,
"alnum_prop": 0.5944258386785491,
"repo_name": "orbitfp7/horizon",
"id": "7efc3f02020819051c49cfbeebc1edad881ea352",
"size": "18222",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/volumes/volumes/tables.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "1536"
},
{
"name": "CSS",
"bytes": "70531"
},
{
"name": "HTML",
"bytes": "420092"
},
{
"name": "JavaScript",
"bytes": "277460"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4417610"
},
{
"name": "Shell",
"bytes": "18318"
}
],
"symlink_target": ""
}
|
import numpy as np
import mnist
import sys
import mlayers_minibatch as ml
import NeuralNetwork as nn
if(len(sys.argv) < 5):
print("Not enough arguments provided. Try \"python ConvolutionalNeuralNetworkMiniBatch.py \'epochs\' \'minibatch size\' \'filter learn rate\' \'fully connected learn rate\'\"")
exit()
EPOCHS = int(sys.argv[1])#80*32
LEARN_RATE = float(sys.argv[4])#0.1
LEARN_RATE_CONV = float(sys.argv[3])#0.001
ml.GRADIENT_THRESHOLD = 10000
MINIBATCH_SIZE = int(sys.argv[2])#1
np.set_printoptions(threshold=np.inf, precision=4, linewidth=300)
layers = [ml.ConvolutionalLayer(28,28,1,6,5,1,0), ml.LeakyReLULayer(), ml.MaxPoolingLayer(2,2), ml.ConvolutionalLayer(12,12,6,16,5,1,0), ml.LeakyReLULayer(), ml.MaxPoolingLayer(2,2), ml.FullyConnectedLayer(16,4,4,100), ml.LeakyReLULayer(), ml.InnerLayerRevised(40,100), ml.LeakyReLULayer(), ml.InnerLayerRevised(10, 40), ml.SoftmaxLayer()]
training_data, train_classifications = mnist.load_mnist('training', path = r'C:\Users\fastslash8\OneDrive\Coding\Python\Machine Learning\modular-layers', asbytes=True)
testing_data, test_classifications = mnist.load_mnist('testing', path = r'C:\Users\fastslash8\OneDrive\Coding\Python\Machine Learning\modular-layers', asbytes=True)
training_list = [np.divide(training_data[index].reshape(1,training_data.shape[1],training_data.shape[2]),255/2) - 1 for index in range(training_data.shape[0])]
testing_list = [np.divide(testing_data[index].reshape(1,testing_data.shape[1],testing_data.shape[2]),255/2) - 1 for index in range(testing_data.shape[0])]
train_classifications -= 1
test_classifications -= 1
network = nn.NeuralNetwork(layers, epochs=EPOCHS, learn_rate=LEARN_RATE, minibatch_size=MINIBATCH_SIZE)
network.set_debug_options(show_loss=True)
network.train_network(training_list, train_classifications, 10)
network.test_network(testing_list, test_classifications, 10)
|
{
"content_hash": "044f9e990c3f460c2ab712c0aede90bd",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 339,
"avg_line_length": 49.60526315789474,
"alnum_prop": 0.7575596816976128,
"repo_name": "fastslash8/modular-layers",
"id": "fa5c4c605a567837a32f56b448bb6b03ca6dbb8b",
"size": "1885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "TrainNetworkMnist.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55826"
}
],
"symlink_target": ""
}
|
class Calculator:
def power(self, n, p):
if n < 0 or p < 0:
raise Exception("n and p should be non-negative")
else:
return n**p
myCalculator=Calculator()
T=int(input())
for i in range(T):
n,p = map(int, input().split())
try:
ans=myCalculator.power(n,p)
print(ans)
except Exception as e:
print(e)
|
{
"content_hash": "49471e5785c0d1d014e0c70d0dbc55f6",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 61,
"avg_line_length": 24.5,
"alnum_prop": 0.5229591836734694,
"repo_name": "rho2/30DaysOfCode",
"id": "f5bea282a9aefbf8519bd586d89269249d583ae0",
"size": "392",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "day17.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "1255"
},
{
"name": "Lua",
"bytes": "3365"
},
{
"name": "Python",
"bytes": "8452"
},
{
"name": "Ruby",
"bytes": "121"
}
],
"symlink_target": ""
}
|
import pytest
import base64
import random
from vcx.error import ErrorCode, VcxError
from vcx.state import State
from vcx.api.connection import Connection
source_id = '123'
connection_options = '{"connection_type":"SMS","phone":"8019119191","use_public_did":true}'
details = '{"connReqId":"njjmmdg","senderAgencyDetail":{"DID":"YRuVCckY6vfZfX9kcQZe3u","endpoint":"52.38.32.107:80/agency/msg","verKey":"J8Yct6FwmarXjrE2khZesUXRVVSVczSoa9sFaGe6AD2v"},"senderDetail":{"DID":"JZho9BzVAEk8jJ1hwrrDiZ","agentKeyDlgProof":{"agentDID":"JDF8UHPBTXigvtJWeeMJzx","agentDelegatedKey":"AP5SzUaHHhF5aLmyKHB3eTqUaREGKyVttwo5T4uwEkM4","signature":"JHSvITBMZiTEhpK61EDIWjQOLnJ8iGQ3FT1nfyxNNlxSngzp1eCRKnGC/RqEWgtot9M5rmTC8QkZTN05GGavBg=="},"logoUrl":"https://robohash.org/123","name":"Evernym","verKey":"AaEDsDychoytJyzk4SuzHMeQJGCtQhQHDitaic6gtiM1"},"statusCode":"MS-101","statusMsg":"message created","targetName":"there"}'
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_create_connection():
connection = await Connection.create(source_id)
assert connection.source_id == source_id
assert connection.handle > 0
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_connection_connect():
connection = await Connection.create(source_id)
invite_details = await connection.connect(connection_options)
assert invite_details
await connection.delete()
with pytest.raises(VcxError) as e:
await connection.serialize()
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_connection_send_message():
connection = await Connection.create(source_id)
invite_details = await connection.connect(connection_options)
assert invite_details
with pytest.raises(VcxError) as e:
msg_id = await connection.send_message("msg","type","title")
assert ErrorCode.NotReady == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_connection_sign_data():
connection = await Connection.create(source_id)
invite_details = await connection.connect(connection_options)
assert invite_details
signature = await connection.sign_data(invite_details)
assert signature
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_connection_verify_signature():
connection = await Connection.create(source_id)
invite_details = await connection.connect(connection_options)
assert invite_details
signature = await connection.verify_signature(invite_details, invite_details)
assert signature
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_connection_with_invite_connect():
connection = await Connection.create_with_details(source_id, details)
invite = await connection.connect(connection_options)
assert invite
@pytest.mark.asyncio
async def test_call_to_connect_with_bad_handle():
with pytest.raises(VcxError) as e:
invalid_connection = Connection(source_id)
invalid_connection.handle = 0
await invalid_connection.connect(connection_options)
assert ErrorCode.InvalidConnectionHandle == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_call_to_connect_state_not_initialized():
with pytest.raises(VcxError) as e:
connection = await Connection.create(source_id)
await connection.connect(connection_options)
data = await connection.serialize()
data['data']['state'] = 0
data['data']['handle'] = random.randint(900, 99999)
connection2 = await Connection.deserialize(data)
await connection2.connect(connection_options)
assert ErrorCode.ConnectionError == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_serialize():
connection = await Connection.create(source_id)
await connection.connect(connection_options)
data = await connection.serialize()
assert data.get('data').get('source_id') == source_id
@pytest.mark.asyncio
async def test_serialize_with_bad_handle():
with pytest.raises(VcxError) as e:
connection = Connection(source_id)
connection.handle = 0
await connection.serialize()
assert ErrorCode.InvalidConnectionHandle == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_deserialize():
connection = await Connection.create(source_id)
await connection.connect(connection_options)
data = await connection.serialize()
connection2 = await Connection.deserialize(data)
assert connection2.source_id == data.get('source_id')
state = await connection2.get_state()
assert state == State.OfferSent
connection3 = connection
@pytest.mark.asyncio
async def test_deserialize_with_invalid_data():
with pytest.raises(VcxError) as e:
data = {'invalid': -99}
await Connection.deserialize(data)
assert ErrorCode.InvalidJson == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_serialize_deserialize_and_then_serialize():
connection = await Connection.create(source_id)
await connection.connect(connection_options)
data1 = await connection.serialize()
connection2 = await Connection.deserialize(data1)
data2 = await connection2.serialize()
assert data1 == data2
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_connection_release():
with pytest.raises(VcxError) as e:
connection = await Connection.create(source_id)
assert connection.handle > 0
connection.release()
await connection.serialize()
assert ErrorCode.InvalidConnectionHandle == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_update_state():
connection = await Connection.create(source_id)
assert await connection.update_state() == State.Initialized
await connection.connect(connection_options)
assert await connection.update_state() == State.OfferSent
@pytest.mark.asyncio
async def test_update_state_with_invalid_handle():
with pytest.raises(VcxError) as e:
connection = Connection(source_id)
connection.handle = 0
await connection.update_state()
assert ErrorCode.InvalidConnectionHandle == e.value.error_code
@pytest.mark.asyncio
@pytest.mark.usefixtures('vcx_init_test_mode')
async def test_get_state():
connection = await Connection.create(source_id)
assert await connection.get_state() == State.Initialized
|
{
"content_hash": "cb1c9224036dae98cd8c2d857a3df38e",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 643,
"avg_line_length": 38.10285714285714,
"alnum_prop": 0.7422015596880623,
"repo_name": "anastasia-tarasova/indy-sdk",
"id": "459a0e30626ec60a988576ca49662e4cd1cb8230",
"size": "6668",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "vcx/wrappers/python3/tests/test_connection.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "207870"
},
{
"name": "C#",
"bytes": "842011"
},
{
"name": "C++",
"bytes": "229233"
},
{
"name": "CSS",
"bytes": "137079"
},
{
"name": "Dockerfile",
"bytes": "23945"
},
{
"name": "Groovy",
"bytes": "102863"
},
{
"name": "HTML",
"bytes": "897750"
},
{
"name": "Java",
"bytes": "882162"
},
{
"name": "JavaScript",
"bytes": "185247"
},
{
"name": "Makefile",
"bytes": "328"
},
{
"name": "Objective-C",
"bytes": "584121"
},
{
"name": "Objective-C++",
"bytes": "706749"
},
{
"name": "Perl",
"bytes": "8271"
},
{
"name": "Python",
"bytes": "750776"
},
{
"name": "Ruby",
"bytes": "80525"
},
{
"name": "Rust",
"bytes": "5872898"
},
{
"name": "Shell",
"bytes": "251160"
},
{
"name": "Swift",
"bytes": "1114"
},
{
"name": "TypeScript",
"bytes": "197439"
}
],
"symlink_target": ""
}
|
class Reducer(object):
def __init__(self):
pass
def word_counter(self, word_list, origin_list):
for word, count in origin_list.items():
if word in word_list:
word_list[word] += count
else:
word_list[word] = count
def do_reduce(self, dataA, dataB):
data = {}
data["done"] = list(set(dataA["done"] + dataB["done"]))
data["to"] = [
i for i in set(dataA["to"] + dataB["to"]) if not(i in data["done"])
]
word_list = {}
self.word_counter(word_list, dataA["words"])
self.word_counter(word_list, dataB["words"])
data["words"] = word_list
return data
|
{
"content_hash": "b3b94581d92d29486054d5fe91026a3f",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 79,
"avg_line_length": 35.5,
"alnum_prop": 0.5084507042253521,
"repo_name": "SkyZH/ddcm-word-count",
"id": "054722bf0ed64857bae08e96325e7e2eef6a662a",
"size": "710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wordcount/reducer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "822026"
},
{
"name": "Python",
"bytes": "12891"
},
{
"name": "Shell",
"bytes": "78"
}
],
"symlink_target": ""
}
|
"""Grow theme management."""
import io
import logging
import zipfile
import requests
THEME_ARCHIVE_URL = 'https://github.com/growthemes/{}/archive/master.zip'
class GrowTheme(object):
"""Grow theme."""
def __init__(self, theme_name):
self.theme_name = theme_name
self.archive_url = THEME_ARCHIVE_URL.format(self.theme_name)
@property
def archive(self):
"""Download the archive zip and open."""
logging.info('Downloading `{}` from Github'.format(self.theme_name))
request = requests.get(self.archive_url)
return zipfile.ZipFile(io.BytesIO(request.content), 'r')
def extract(self, pod, force=False):
"""Extract the source archive into the destination pod."""
with self.archive as archive:
logging.info('Extracting theme into {}'.format(pod.root))
# Automatically enable "force" for empty directories.
if pod.list_dir('/') == []:
force = True
archive_prefix_dir = '{}-master'.format(self.theme_name)
archive_files = [name[len(archive_prefix_dir):] for name in archive.namelist()]
# Validate that it won't overwrite any files.
if not force:
for file_name in archive_files:
if file_name == '/':
continue
if pod.file_exists(file_name):
text = ('{}{} already exists. Delete the directory contents before'
' proceeding or use --force.')
logging.warn(text.format(pod.root, file_name))
return
for file_name in archive_files:
if file_name.endswith('/'):
continue
pod.write_file(
file_name, archive.read('{}{}'.format(archive_prefix_dir, file_name)))
logging.info('Pod ready to go: {}'.format(pod.root))
|
{
"content_hash": "c4f9de3540508e5cb3467236dbc05baf",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 91,
"avg_line_length": 35.8,
"alnum_prop": 0.553580497714576,
"repo_name": "grow/pygrow",
"id": "c7db4abc962fc9c5430e1fea78b3695e14e0414e",
"size": "1969",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grow/sdk/themes.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "527"
},
{
"name": "HTML",
"bytes": "8714"
},
{
"name": "Python",
"bytes": "309004"
},
{
"name": "Shell",
"bytes": "4219"
}
],
"symlink_target": ""
}
|
import math
from pandac.PandaModules import Point3
from toontown.toonbase import ToontownGlobals
InputTimeout = 15
TireMovieTimeout = 120
MinWall = (-20.0, -15.0)
MaxWall = (20.0, 15.0)
TireRadius = 1.5
WallMargin = 1 + TireRadius
StartingPositions = (Point3(MinWall[0] + WallMargin, MinWall[1] + WallMargin, TireRadius),
Point3(MaxWall[0] - WallMargin, MaxWall[1] - WallMargin, TireRadius),
Point3(MinWall[0] + WallMargin, MaxWall[1] - WallMargin, TireRadius),
Point3(MaxWall[0] - WallMargin, MinWall[1] + WallMargin, TireRadius))
NumMatches = 3
NumRounds = 2
PointsDeadCenter = {0: 5,
1: 5,
2: 5,
3: 4,
4: 3}
PointsInCorner = 1
FarthestLength = math.sqrt((MaxWall[0] - TireRadius) * (MaxWall[0] - TireRadius) + (MaxWall[1] - TireRadius) * (MaxWall[1] - TireRadius))
BonusPointsForPlace = (3,
2,
1,
0)
ExpandFeetPerSec = 5
ScoreCountUpRate = 0.15
ShowScoresDuration = 4.0
NumTreasures = {ToontownGlobals.ToontownCentral: 2,
ToontownGlobals.DonaldsDock: 2,
ToontownGlobals.DaisyGardens: 2,
ToontownGlobals.MinniesMelodyland: 2,
ToontownGlobals.TheBrrrgh: 1,
ToontownGlobals.DonaldsDreamland: 1}
NumPenalties = {ToontownGlobals.ToontownCentral: 0,
ToontownGlobals.DonaldsDock: 1,
ToontownGlobals.DaisyGardens: 1,
ToontownGlobals.MinniesMelodyland: 1,
ToontownGlobals.TheBrrrgh: 2,
ToontownGlobals.DonaldsDreamland: 2}
Obstacles = {ToontownGlobals.ToontownCentral: (),
ToontownGlobals.DonaldsDock: ((0, 0),),
ToontownGlobals.DaisyGardens: ((MinWall[0] / 2, 0), (MaxWall[0] / 2, 0)),
ToontownGlobals.MinniesMelodyland: ((0, MinWall[1] / 2), (0, MaxWall[1] / 2)),
ToontownGlobals.TheBrrrgh: ((MinWall[0] / 2, 0),
(MaxWall[0] / 2, 0),
(0, MinWall[1] / 2),
(0, MaxWall[1] / 2)),
ToontownGlobals.DonaldsDreamland: ((MinWall[0] / 2, MinWall[1] / 2),
(MinWall[0] / 2, MaxWall[1] / 2),
(MaxWall[0] / 2, MinWall[1] / 2),
(MaxWall[0] / 2, MaxWall[1] / 2))}
ObstacleShapes = {ToontownGlobals.ToontownCentral: True,
ToontownGlobals.DonaldsDock: True,
ToontownGlobals.DaisyGardens: True,
ToontownGlobals.MinniesMelodyland: True,
ToontownGlobals.TheBrrrgh: False,
ToontownGlobals.DonaldsDreamland: False}
|
{
"content_hash": "1e29e55862aecc74931699551c484a27",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 137,
"avg_line_length": 39.220338983050844,
"alnum_prop": 0.672428694900605,
"repo_name": "ToonTownInfiniteRepo/ToontownInfinite",
"id": "572d099a9e5cae6ea9b534662ba67b05a1ccdc21",
"size": "2314",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "toontown/minigame/IceGameGlobals.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1703277"
},
{
"name": "C#",
"bytes": "9892"
},
{
"name": "C++",
"bytes": "5468044"
},
{
"name": "Emacs Lisp",
"bytes": "210083"
},
{
"name": "F#",
"bytes": "4611"
},
{
"name": "JavaScript",
"bytes": "7003"
},
{
"name": "Objective-C",
"bytes": "23212"
},
{
"name": "Puppet",
"bytes": "5245"
},
{
"name": "Python",
"bytes": "34010215"
},
{
"name": "Shell",
"bytes": "11192"
},
{
"name": "Tcl",
"bytes": "1981257"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.