text
stringlengths 4
1.02M
| meta
dict |
|---|---|
from direct.directnotify import DirectNotifyGlobal
from toontown.parties.DistributedPartyActivityAI import DistributedPartyActivityAI
from toontown.parties.DistributedPartyCatchActivityBase import DistributedPartyCatchActivityBase
from direct.task import Task
from direct.distributed.ClockDelta import globalClockDelta
from toontown.toonbase import TTLocalizer
import PartyGlobals
class DistributedPartyCatchActivityAI(DistributedPartyActivityAI, DistributedPartyCatchActivityBase):
notify = DirectNotifyGlobal.directNotify.newCategory("DistributedPartyCatchActivityAI")
def __init__(self, air, parent, activityTuple):
DistributedPartyActivityAI.__init__(self, air, parent, activityTuple)
self.numGenerations = 1
self.generations = []
self.player2catches = {}
self.startTimestamp = globalClockDelta.getRealNetworkTime(bits=32)
self.playing = False
def delete(self):
taskMgr.remove('newGeneration%d' % self.doId)
DistributedPartyActivityAI.delete(self)
def getStartTimestamp(self):
return self.startTimestamp
def setStartTimestamp(self, ts):
self.startTimestamp = ts
def setGenerations(self, generations):
self.generations = generations
def toonJoinRequest(self):
DistributedPartyActivityAI.toonJoinRequest(self)
avId = self.air.getAvatarIdFromSender()
self.player2catches[avId] = 0
if not self.playing:
self.__startGame()
self.sendUpdate('setState', ['Active', globalClockDelta.getRealNetworkTime()])
def toonExitDemand(self):
avId = self.air.getAvatarIdFromSender()
if not avId in self.toonsPlaying:
self.air.writeServerEvent('suspicious',avId,'Toon tried to exit a party game they\'re not using!')
return
catches = self.player2catches[avId]
del self.player2catches[avId]
av = self.air.doId2do.get(avId, None)
if not av:
self.air.writeServerEvent('suspicious',avId,'Toon tried to award beans while not in district!')
return
if catches > PartyGlobals.CatchMaxTotalReward:
catches = PartyGlobals.CatchMaxTotalReward
self.sendUpdateToAvatarId(avId, 'showJellybeanReward', [catches, av.getMoney(), TTLocalizer.PartyCatchRewardMessage % (catches, catches)])
av.addMoney(catches)
DistributedPartyActivityAI.toonExitDemand(self)
def __startGame(self):
self.playing = True
self.calcDifficultyConstants(len(self.toonsPlaying))
self.generations.append([self.numGenerations, globalClockDelta.getRealNetworkTime(bits=32), len(self.toonsPlaying)])
self.numGenerations += 1
self.sendUpdate('setGenerations', [self.generations])
taskMgr.doMethodLater(self.generationDuration, self.__newGeneration, 'newGeneration%d' % self.doId, extraArgs=[])
def __newGeneration(self):
if len(self.toonsPlaying) > 0:
self.__startGame()
else:
self.playing = False
def getGenerations(self):
return self.generations
def requestActivityStart(self):
pass
def startRequestResponse(self, todo0):
pass
def claimCatch(self, generation, objNum, objType):
avId = self.air.getAvatarIdFromSender()
if not avId in self.toonsPlaying:
self.air.writeServerEvent('suspicious',avId,'Toon tried to catch while not playing!')
return
if PartyGlobals.DOTypeId2Name[objType] != 'anvil':
self.player2catches[avId] += 1
self.sendUpdate('setObjectCaught', [avId, generation, objNum])
|
{
"content_hash": "ddbb1aeafbab4d2252596869d2a82ef8",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 146,
"avg_line_length": 42.7816091954023,
"alnum_prop": 0.6880709296077377,
"repo_name": "Spiderlover/Toontown",
"id": "f9fb4135a375ca9d61103861a181d5e7f51d8786",
"size": "3722",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "toontown/parties/DistributedPartyCatchActivityAI.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "7774"
},
{
"name": "Python",
"bytes": "17241353"
},
{
"name": "Shell",
"bytes": "7699"
}
],
"symlink_target": ""
}
|
"""COUNTING AND GROUPING"""
def item_order(order):
"""Return orders and quantities
Example:
>>> item_order("salad salad hamburger")
'salad:2 hamburger:1 water:0'
"""
return "salad:{} hamburger:{} water:{}".format(
order.count("salad"),
order.count("hamburger"),
order.count("water"))
if __name__ == '__main__':
import doctest
doctest.testmod()
|
{
"content_hash": "b007d9a2a30f2465397f67d4bd001dd9",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 51,
"avg_line_length": 21.736842105263158,
"alnum_prop": 0.5690072639225182,
"repo_name": "mottosso/mitx-6.00.1x",
"id": "e79338cf2ce9b1c6eb6a15962076be61f6a07ea6",
"size": "413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pset1/problem3.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3726"
}
],
"symlink_target": ""
}
|
from datetime import timedelta
from rest_framework import status as http_status
from django.utils import timezone
from nose.tools import * # noqa (PEP8 asserts)
from framework.auth import campaigns, views as auth_views, cas
from website.util import web_url_for
from website.util.metrics import provider_source_tag
from osf_tests import factories
from tests.base import OsfTestCase
from tests.utils import mock_auth
def set_preprint_providers():
"""Populate `PreprintProvider` to test database for testing."""
providers = {
'osf': 'Open Science Framework',
'socarxiv': 'SocArXiv',
'engrxiv': 'EngrXiv',
'psyarxiv': 'PsyArXiv',
}
for key, value in providers.items():
provider = factories.PreprintProviderFactory()
provider._id = key
provider.name = value
provider.save()
# tests for campaign initialization and update
class TestCampaignInitialization(OsfTestCase):
def setUp(self):
super(TestCampaignInitialization, self).setUp()
set_preprint_providers()
self.campaign_lists = [
'erpc',
'institution',
'osf-preprints',
'socarxiv-preprints',
'engrxiv-preprints',
'psyarxiv-preprints',
'osf-registries',
'osf-registered-reports',
]
self.refresh = timezone.now()
campaigns.CAMPAIGNS = None # force campaign refresh now that preprint providers are populated
campaigns.CAMPAIGNS_LAST_REFRESHED = self.refresh
def test_get_campaigns_init(self):
campaign_dict = campaigns.get_campaigns()
assert_equal(len(campaign_dict), len(self.campaign_lists))
for campaign in campaign_dict:
assert_in(campaign, self.campaign_lists)
assert_not_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
def test_get_campaigns_update_not_expired(self):
campaigns.get_campaigns()
self.refresh = campaigns.CAMPAIGNS_LAST_REFRESHED
campaigns.get_campaigns()
assert_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
def test_get_campaigns_update_expired(self):
campaigns.get_campaigns()
self.refresh = timezone.now() - timedelta(minutes=5)
campaigns.CAMPAIGNS_LAST_REFRESHED = self.refresh
campaigns.get_campaigns()
assert_not_equal(self.refresh, campaigns.CAMPAIGNS_LAST_REFRESHED)
# tests for campaign helper methods
class TestCampaignMethods(OsfTestCase):
def setUp(self):
super(TestCampaignMethods, self).setUp()
set_preprint_providers()
self.campaign_lists = [
'erpc',
'institution',
'osf-preprints',
'socarxiv-preprints',
'engrxiv-preprints',
'psyarxiv-preprints',
]
self.invalid_campaign = 'invalid_campaign'
campaigns.CAMPAIGNS = None # force campaign refresh now that preprint providers are populated
def test_is_institution_login(self):
for campaign in self.campaign_lists:
institution = campaigns.is_institution_login(campaign)
if campaign == 'institution':
assert_true(institution)
else:
assert_false(institution)
institution = campaigns.is_institution_login(self.invalid_campaign)
assert_true(institution is None)
def test_is_native_login(self):
for campaign in self.campaign_lists:
native = campaigns.is_native_login(campaign)
if campaign == 'erpc':
assert_true(native)
else:
assert_false(native)
native = campaigns.is_proxy_login(self.invalid_campaign)
assert_true(native is None)
def test_is_proxy_login(self):
for campaign in self.campaign_lists:
proxy = campaigns.is_proxy_login(campaign)
if campaign.endswith('-preprints'):
assert_true(proxy)
else:
assert_false(proxy)
proxy = campaigns.is_proxy_login(self.invalid_campaign)
assert_true(proxy is None)
def test_system_tag_for_campaign(self):
for campaign in self.campaign_lists:
tag = campaigns.system_tag_for_campaign(campaign)
assert_true(tag is not None)
tag = campaigns.system_tag_for_campaign(self.invalid_campaign)
assert_true(tag is None)
def test_email_template_for_campaign(self):
for campaign in self.campaign_lists:
template = campaigns.email_template_for_campaign(campaign)
if campaigns.is_institution_login(campaign):
assert_true(template is None)
else:
assert_true(template is not None)
template = campaigns.email_template_for_campaign(self.invalid_campaign)
assert_true(template is None)
def test_campaign_url_for(self):
for campaign in self.campaign_lists:
url = campaigns.campaign_url_for(campaign)
assert_true(url is not None)
url = campaigns.campaign_url_for(self.invalid_campaign)
assert_true(url is None)
def test_get_service_provider(self):
for campaign in self.campaign_lists:
provider = campaigns.get_service_provider(campaign)
if campaigns.is_proxy_login(campaign):
assert_true(provider is not None)
else:
assert_true(provider is None)
provider = campaigns.get_service_provider(self.invalid_campaign)
assert_true(provider is None)
def test_campaign_for_user(self):
user = factories.UserFactory()
user.add_system_tag(provider_source_tag('osf', 'preprint'))
user.save()
campaign = campaigns.campaign_for_user(user)
assert_equal(campaign, 'osf-preprints')
# tests for prereg, erpc, which follow similar auth login/register logic
class TestCampaignsAuthViews(OsfTestCase):
def setUp(self):
super(TestCampaignsAuthViews, self).setUp()
self.campaigns = {
'erpc': {
'title_register': 'Election Research Preacceptance Competition',
'title_landing': 'The Election Research Preacceptance Competition is Now Closed'
},
}
for key, value in self.campaigns.items():
value.update({'url_login': web_url_for('auth_login', campaign=key)})
value.update({'url_register': web_url_for('auth_register', campaign=key)})
value.update({'url_landing': campaigns.campaign_url_for(key)})
self.user = factories.AuthUserFactory()
def test_campaign_register_view_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_register'], auth=self.user.auth)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_equal(value['url_landing'], resp.headers['Location'])
def test_campaign_register_view_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_register'])
assert_equal(resp.status_code, http_status.HTTP_200_OK)
assert_in(value['title_register'], resp)
def test_campaign_login_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_login'], auth=self.user.auth)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_in(value['url_landing'], resp.headers['Location'])
def test_campaign_login_logged_out(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_login'])
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_in(value['url_register'], resp.headers['Location'])
def test_campaign_landing_logged_in(self):
for key, value in self.campaigns.items():
resp = self.app.get(value['url_landing'], auth=self.user.auth)
assert_equal(resp.status_code, http_status.HTTP_200_OK)
# tests for registration through campaigns
class TestRegistrationThroughCampaigns(OsfTestCase):
def setUp(self):
super(TestRegistrationThroughCampaigns, self).setUp()
campaigns.get_campaigns() # Set up global CAMPAIGNS
def test_confirm_email_get_with_campaign(self):
for key, value in campaigns.CAMPAIGNS.items():
user = factories.UnconfirmedUserFactory()
user.add_system_tag(value.get('system_tag'))
user.save()
token = user.get_confirmation_token(user.username)
kwargs = {
'uid': user._id,
}
with self.app.app.test_request_context(), mock_auth(user):
res = auth_views.confirm_email_get(token, **kwargs)
assert_equal(res.status_code, http_status.HTTP_302_FOUND)
assert_equal(res.location, campaigns.campaign_url_for(key))
# tests for institution
class TestCampaignsCASInstitutionLogin(OsfTestCase):
def setUp(self):
super(TestCampaignsCASInstitutionLogin, self).setUp()
self.url_login = web_url_for('auth_login', campaign='institution')
self.url_register = web_url_for('auth_register', campaign='institution')
self.service_url = web_url_for('dashboard', _absolute=True)
# go to CAS institution login page if not logged in
def test_institution_not_logged_in(self):
resp = self.app.get(self.url_login)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_in(cas.get_login_url(self.service_url, campaign='institution'), resp.headers['Location'])
# register behave the same as login
resp2 = self.app.get(self.url_register)
assert_equal(resp.headers['Location'], resp2.headers['Location'])
# go to target page (service url_ if logged in
def test_institution_logged_in(self):
resp = self.app.get(self.url_login)
assert_equal(resp.status_code, http_status.HTTP_302_FOUND)
assert_in(self.service_url, resp.headers['Location'])
# register behave the same as login
resp2 = self.app.get(self.url_register)
assert_equal(resp.headers['Location'], resp2.headers['Location'])
|
{
"content_hash": "8b3d9bb5294c004ac7c20cea61b0da05",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 104,
"avg_line_length": 40.26953125,
"alnum_prop": 0.6401202832476477,
"repo_name": "aaxelb/osf.io",
"id": "442d1d1f9314853daecf10446498e836c68ca284",
"size": "10309",
"binary": false,
"copies": "5",
"ref": "refs/heads/feature/keen-replacement",
"path": "tests/test_campaigns.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "93635"
},
{
"name": "Dockerfile",
"bytes": "5876"
},
{
"name": "HTML",
"bytes": "373758"
},
{
"name": "JavaScript",
"bytes": "1596130"
},
{
"name": "Jupyter Notebook",
"bytes": "41326"
},
{
"name": "Mako",
"bytes": "679193"
},
{
"name": "Python",
"bytes": "12036193"
},
{
"name": "Shell",
"bytes": "2841"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
}
|
from django.db.models import Q
from django.apps import apps
from taiga.front.templatetags.functions import resolve
from .base import Sitemap
class WikiPagesSitemap(Sitemap):
def items(self):
wiki_page_model = apps.get_model("wiki", "WikiPage")
# Get wiki pages of public projects OR private projects if anon user can view them
queryset = wiki_page_model.objects.filter(Q(project__is_private=False) |
Q(project__is_private=True,
project__anon_permissions__contains=["view_wiki_pages"]))
# Exclude blocked projects
queryset = queryset.filter(project__blocked_code__isnull=True)
# Exclude wiki pages from projects without wiki section enabled
queryset = queryset.exclude(project__is_wiki_activated=False)
# Project data is needed
queryset = queryset.select_related("project")
return queryset
def location(self, obj):
return resolve("wiki", obj.project.slug, obj.slug)
def lastmod(self, obj):
return obj.modified_date
def changefreq(self, obj):
return "daily"
def priority(self, obj):
return 0.6
|
{
"content_hash": "4e28198dfca653a7e0ffcabeaa084f54",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 109,
"avg_line_length": 31.76923076923077,
"alnum_prop": 0.6263115415657788,
"repo_name": "curiosityio/taiga-docker",
"id": "85e03ba041fe712eeb2700ea16ce8afde3eabd68",
"size": "2098",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "taiga-back/taiga-back/taiga/front/sitemaps/wiki.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "186988"
},
{
"name": "JavaScript",
"bytes": "2007"
},
{
"name": "Nginx",
"bytes": "4140"
},
{
"name": "Python",
"bytes": "2793020"
},
{
"name": "Shell",
"bytes": "1392"
}
],
"symlink_target": ""
}
|
import os
import sys
import csv
path = '/Users/nealcaidin/Documents/tmp/Tools'
# create a dictionary to hold the sum of all percentages of tool use across file
# (both by sites and by participants using a Python tuple construct)
aggregate_tools_percentages = {}
for file in os.listdir(path):
current = os.path.join(path, file)
if os.path.isfile(current) and ("csv" in current):
csv_file = open(current,'rU')
reader = csv.reader(csv_file)
# reset the total events. We will use this to calculate percentages
i = 0
# initialize total number of tool placements that are counted for the current file, both based on
# number of sites and based on the number of participants in the sites.
total_by_sites_in_file = 0
total_by_participants_in_file = 0
#reset dictionary which will contain the data for each tool
tools_per_file = {}
# catching exceptions helps identify data issues
# which need cleaning up
try:
for row in reader:
i += 1
# skip first row - header
if i > 1 :
# clean up any excess white space in the tool_name
tool_name = row[0].strip()
tool_num_sites = float(row[1])
tool_num_participants = float(row[2])
# add to totals so we can figure out proportion (percentages) later
total_by_sites_in_file += tool_num_sites
total_by_participants_in_file += tool_num_participants
# save data for each tool from the file
tools_per_file[tool_name] = (tool_num_sites, tool_num_participants)
except csv.Error:
print current
print('csv choked on line %s' % (i+1))
raise
except ValueError:
print current
print('csv choked on line %s' % (i+1))
raise
except IndexError:
print current
print('csv choked on line %s' % (i+1))
raise
# now we can calculate the relative proportion (percentage) of tool use both
# with respect to sites and with respect to number of participants, for each tool
for one_tool in tools_per_file:
# calculate the proportions (percentages)
tools_site_percentage = tools_per_file[one_tool][0] / total_by_sites_in_file
tools_participants_percentage = tools_per_file[one_tool][1] / total_by_participants_in_file
if aggregate_tools_percentages.has_key(one_tool):
aggregate_tools_percentages[one_tool][0] += tools_site_percentage
aggregate_tools_percentages[one_tool][1] += tools_participants_percentage
else:
aggregate_tools_percentages[one_tool] = [tools_site_percentage, tools_participants_percentage]
for a_tool in aggregate_tools_percentages:
# zero index is by sites, and one index is by participants
print a_tool, "," , aggregate_tools_percentages[a_tool][0], ",", aggregate_tools_percentages[a_tool][1]
|
{
"content_hash": "7f6ce965977755ce7b6886ca41734dc4",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 110,
"avg_line_length": 37.36470588235294,
"alnum_prop": 0.6001259445843828,
"repo_name": "ncaidin/sakai_sql_survey_results",
"id": "df4fdaf54787561477c3034d0419937446c279ac",
"size": "3176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "process_tools_files.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7204"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
class TestTorrentMatch(object):
config = """
tasks:
test_multi_torrent_empty_name:
mock:
- {title: 'torrent1', file: 'torrent_match_test_torrents/torrent1_empty_name.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'torrent1', location: 'torrent_match_test_dir/torrent1'}
test_single_torrent:
mock:
- {title: 'torrent1', file: 'torrent_match_test_torrents/torrent1.mkv.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'torrent1.mkv', location: 'torrent_match_test_dir/torrent1.mkv'}
test_single_torrent_in_other_dir:
mock:
- {title: 'torrent1', file: 'torrent_match_test_torrents/torrent1.mkv.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'torrent1.mkv', location: 'torrent_match_test_dir/torrent1/torrent1.mkv'}
test_single_torrent_wrong_size:
mock:
- {title: 'torrent1', file: 'torrent_match_test_torrents/torrent1.mkv.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'torrent1.mkv', location: 'torrent_match_test_dir/torrent1_wrong_size/torrent1.mkv'}
test_multi_torrent_with_diff_not_allowed:
mock:
- {title: 'multi_file_with_diff', file: 'torrent_match_test_torrents/multi_file_with_diff.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'multi_file_with_diff', location: 'torrent_match_test_dir'}
test_multi_torrent_with_diff_allowed:
mock:
- {title: 'multi_file_with_diff', file: 'torrent_match_test_torrents/multi_file_with_diff.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'multi_file_with_diff', location: 'torrent_match_test_dir'}
max_size_difference: 5%
test_multi_torrent_is_root_dir:
mock:
- {title: 'multi_file_with_diff', file: 'torrent_match_test_torrents/multi_file_with_diff.torrent'}
accept_all: yes
torrent_match:
what:
- mock:
- {title: 'multi_file_with_diff', location: 'torrent_match_test_dir/multi_file_with_diff'}
max_size_difference: 5%
test_with_filesystem:
filesystem: 'torrent_match_test_torrents/'
accept_all: yes
torrent_match:
what:
- filesystem: 'torrent_match_test_dir/'
max_size_difference: 5%
"""
def test_multi_torrent_empty_name(self, execute_task):
task = execute_task('test_multi_torrent_empty_name')
assert len(task.accepted) == 1, 'Should have accepted torrent1.mkv'
assert task.accepted[0]['path'] == 'torrent_match_test_dir/torrent1'
def test_single_torrent(self, execute_task):
task = execute_task('test_single_torrent')
assert len(task.accepted) == 1, 'Should have accepted torrent1.mkv'
assert task.accepted[0]['path'] == 'torrent_match_test_dir'
def test_single_torrent_in_other_dir(self, execute_task):
task = execute_task('test_single_torrent_in_other_dir')
assert len(task.accepted) == 1, 'Should have accepted torrent1.mkv'
assert task.accepted[0]['path'] == 'torrent_match_test_dir/torrent1'
def test_single_torrent_wrong_size(self, execute_task):
task = execute_task('test_single_torrent_wrong_size')
assert len(task.rejected) == 1, 'Should have rejected torrent1.mkv because its size does not match'
def test_multi_torrent_with_diff_not_allowed(self, execute_task):
task = execute_task('test_multi_torrent_with_diff_not_allowed')
assert len(task.rejected) == 1, 'Should have rejected multi_file_with_diff because its size does not match'
def test_multi_torrent_with_diff_allowed(self, execute_task):
task = execute_task('test_multi_torrent_with_diff_allowed')
assert len(task.accepted) == 1, 'Should have accepted multi_file_with_diff because its size is within threshold'
assert task.accepted[0]['path'] == 'torrent_match_test_dir'
def test_multi_torrent_is_root_dir(self, execute_task):
task = execute_task('test_multi_torrent_is_root_dir')
assert len(task.accepted) == 1, 'Should have accepted multi_file_with_diff because its size is within threshold'
assert task.accepted[0]['path'] == 'torrent_match_test_dir'
def test_with_filesystem(self, execute_task):
task = execute_task('test_with_filesystem')
assert len(task.all_entries) == 4, 'There should be three torrent files, thus three entries'
assert len(task.accepted) == 4, 'Should have accepted multi_file_with_diff, torrent1.mkv, torrent2 and ' \
'torrent1 because their sizes are within the allowed threshold'
|
{
"content_hash": "fca0f610783456494fbe1831d3df7608",
"timestamp": "",
"source": "github",
"line_count": 121,
"max_line_length": 120,
"avg_line_length": 45.00826446280992,
"alnum_prop": 0.5883217040029379,
"repo_name": "jawilson/Flexget",
"id": "e20fe4a8b13fae461fddbedd0d99ae72461114e5",
"size": "5446",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "flexget/tests/test_torrent_match.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "11875"
},
{
"name": "Dockerfile",
"bytes": "1988"
},
{
"name": "HTML",
"bytes": "79800"
},
{
"name": "JavaScript",
"bytes": "263723"
},
{
"name": "Python",
"bytes": "3364620"
},
{
"name": "SRecode Template",
"bytes": "3"
},
{
"name": "Shell",
"bytes": "1576"
}
],
"symlink_target": ""
}
|
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
import sphinxcontrib-rdf
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sphinxcontrib-rdf'
copyright = u'2013, Wes Turner'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = sphinxcontrib-rdf.__version__
# The full version, including alpha/beta/rc tags.
release = sphinxcontrib-rdf.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'sphinxcontrib-rdfdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'sphinxcontrib-rdf.tex', u'sphinxcontrib-rdf Documentation',
u'Wes Turner', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sphinxcontrib-rdf', u'sphinxcontrib-rdf Documentation',
[u'Wes Turner'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sphinxcontrib-rdf', u'sphinxcontrib-rdf Documentation',
u'Wes Turner', 'sphinxcontrib-rdf', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "07db96c87c2bff382a064b29796afca7",
"timestamp": "",
"source": "github",
"line_count": 246,
"max_line_length": 80,
"avg_line_length": 32.58130081300813,
"alnum_prop": 0.7075483468496568,
"repo_name": "westurner/sphinxcontrib-rdf",
"id": "eb2a8b45bee9dd64ffec57446077e93f1ba1ae80",
"size": "8458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1222"
},
{
"name": "Python",
"bytes": "2473"
}
],
"symlink_target": ""
}
|
import socket
import time
from enum import IntEnum
from typing import Tuple, TYPE_CHECKING
import threading
from PyQt5.QtCore import Qt, pyqtSignal, QThread
from PyQt5.QtWidgets import (QTreeWidget, QTreeWidgetItem, QMenu, QGridLayout, QComboBox,
QLineEdit, QDialog, QVBoxLayout, QHeaderView, QCheckBox,
QTabWidget, QWidget, QLabel)
from PyQt5.QtGui import QFontMetrics
from electrum.i18n import _
from electrum import constants, blockchain, util
from electrum.interface import ServerAddr, PREFERRED_NETWORK_PROTOCOL
from electrum.network import Network
from electrum.logging import get_logger
from .util import (Buttons, CloseButton, HelpButton, read_QIcon, char_width_in_lineedit,
PasswordLineEdit)
from .util import QtEventListener, qt_event_listener
if TYPE_CHECKING:
from electrum.simple_config import SimpleConfig
_logger = get_logger(__name__)
protocol_names = ['TCP', 'SSL']
protocol_letters = 'ts'
class NetworkDialog(QDialog, QtEventListener):
def __init__(self, *, network: Network, config: 'SimpleConfig'):
QDialog.__init__(self)
self.setWindowTitle(_('Network'))
self.setMinimumSize(500, 500)
self.nlayout = NetworkChoiceLayout(network, config)
vbox = QVBoxLayout(self)
vbox.addLayout(self.nlayout.layout())
vbox.addLayout(Buttons(CloseButton(self)))
self.register_callbacks()
self._cleaned_up = False
@qt_event_listener
def on_event_network_updated(self):
self.nlayout.update()
def clean_up(self):
if self._cleaned_up:
return
self._cleaned_up = True
self.nlayout.clean_up()
self.unregister_callbacks()
class NodesListWidget(QTreeWidget):
"""List of connected servers."""
SERVER_ADDR_ROLE = Qt.UserRole + 100
CHAIN_ID_ROLE = Qt.UserRole + 101
ITEMTYPE_ROLE = Qt.UserRole + 102
class ItemType(IntEnum):
CHAIN = 0
CONNECTED_SERVER = 1
DISCONNECTED_SERVER = 2
TOPLEVEL = 3
def __init__(self, parent):
QTreeWidget.__init__(self)
self.parent = parent # type: NetworkChoiceLayout
self.setHeaderLabels([_('Server'), _('Height')])
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.create_menu)
def create_menu(self, position):
item = self.currentItem()
if not item:
return
item_type = item.data(0, self.ITEMTYPE_ROLE)
menu = QMenu()
if item_type == self.ItemType.CONNECTED_SERVER:
server = item.data(0, self.SERVER_ADDR_ROLE) # type: ServerAddr
menu.addAction(_("Use as server"), lambda: self.parent.follow_server(server))
elif item_type == self.ItemType.DISCONNECTED_SERVER:
server = item.data(0, self.SERVER_ADDR_ROLE) # type: ServerAddr
def func():
self.parent.server_e.setText(server.net_addr_str())
self.parent.set_server()
menu.addAction(_("Use as server"), func)
elif item_type == self.ItemType.CHAIN:
chain_id = item.data(0, self.CHAIN_ID_ROLE)
menu.addAction(_("Follow this branch"), lambda: self.parent.follow_branch(chain_id))
else:
return
menu.exec_(self.viewport().mapToGlobal(position))
def keyPressEvent(self, event):
if event.key() in [Qt.Key_F2, Qt.Key_Return, Qt.Key_Enter]:
self.on_activated(self.currentItem(), self.currentColumn())
else:
QTreeWidget.keyPressEvent(self, event)
def on_activated(self, item, column):
# on 'enter' we show the menu
pt = self.visualItemRect(item).bottomLeft()
pt.setX(50)
self.customContextMenuRequested.emit(pt)
def update(self, *, network: Network, servers: dict, use_tor: bool):
self.clear()
# connected servers
connected_servers_item = QTreeWidgetItem([_("Connected nodes"), ''])
connected_servers_item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.TOPLEVEL)
chains = network.get_blockchains()
n_chains = len(chains)
for chain_id, interfaces in chains.items():
b = blockchain.blockchains.get(chain_id)
if b is None: continue
name = b.get_name()
if n_chains > 1:
x = QTreeWidgetItem([name + '@%d'%b.get_max_forkpoint(), '%d'%b.height()])
x.setData(0, self.ITEMTYPE_ROLE, self.ItemType.CHAIN)
x.setData(0, self.CHAIN_ID_ROLE, b.get_id())
else:
x = connected_servers_item
for i in interfaces:
star = ' *' if i == network.interface else ''
item = QTreeWidgetItem([f"{i.server.to_friendly_name()}" + star, '%d'%i.tip])
item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.CONNECTED_SERVER)
item.setData(0, self.SERVER_ADDR_ROLE, i.server)
item.setToolTip(0, str(i.server))
x.addChild(item)
if n_chains > 1:
connected_servers_item.addChild(x)
# disconnected servers
disconnected_servers_item = QTreeWidgetItem([_("Other known servers"), ""])
disconnected_servers_item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.TOPLEVEL)
connected_hosts = set([iface.host for ifaces in chains.values() for iface in ifaces])
protocol = PREFERRED_NETWORK_PROTOCOL
for _host, d in sorted(servers.items()):
if _host in connected_hosts:
continue
if _host.endswith('.onion') and not use_tor:
continue
port = d.get(protocol)
if port:
server = ServerAddr(_host, port, protocol=protocol)
item = QTreeWidgetItem([server.net_addr_str(), ""])
item.setData(0, self.ITEMTYPE_ROLE, self.ItemType.DISCONNECTED_SERVER)
item.setData(0, self.SERVER_ADDR_ROLE, server)
disconnected_servers_item.addChild(item)
self.addTopLevelItem(connected_servers_item)
self.addTopLevelItem(disconnected_servers_item)
connected_servers_item.setExpanded(True)
for i in range(connected_servers_item.childCount()):
connected_servers_item.child(i).setExpanded(True)
disconnected_servers_item.setExpanded(True)
# headers
h = self.header()
h.setStretchLastSection(False)
h.setSectionResizeMode(0, QHeaderView.Stretch)
h.setSectionResizeMode(1, QHeaderView.ResizeToContents)
super().update()
class NetworkChoiceLayout(object):
def __init__(self, network: Network, config: 'SimpleConfig', wizard=False):
self.network = network
self.config = config
self.tor_proxy = None
self.tabs = tabs = QTabWidget()
proxy_tab = QWidget()
blockchain_tab = QWidget()
tabs.addTab(blockchain_tab, _('Overview'))
tabs.addTab(proxy_tab, _('Proxy'))
fixed_width_hostname = 24 * char_width_in_lineedit()
fixed_width_port = 6 * char_width_in_lineedit()
# Proxy tab
grid = QGridLayout(proxy_tab)
grid.setSpacing(8)
# proxy setting
self.proxy_cb = QCheckBox(_('Use proxy'))
self.proxy_cb.clicked.connect(self.check_disable_proxy)
self.proxy_cb.clicked.connect(self.set_proxy)
self.proxy_mode = QComboBox()
self.proxy_mode.addItems(['SOCKS4', 'SOCKS5'])
self.proxy_host = QLineEdit()
self.proxy_host.setFixedWidth(fixed_width_hostname)
self.proxy_port = QLineEdit()
self.proxy_port.setFixedWidth(fixed_width_port)
self.proxy_user = QLineEdit()
self.proxy_user.setPlaceholderText(_("Proxy user"))
self.proxy_password = PasswordLineEdit()
self.proxy_password.setPlaceholderText(_("Password"))
self.proxy_password.setFixedWidth(fixed_width_port)
self.proxy_mode.currentIndexChanged.connect(self.set_proxy)
self.proxy_host.editingFinished.connect(self.set_proxy)
self.proxy_port.editingFinished.connect(self.set_proxy)
self.proxy_user.editingFinished.connect(self.set_proxy)
self.proxy_password.editingFinished.connect(self.set_proxy)
self.proxy_mode.currentIndexChanged.connect(self.proxy_settings_changed)
self.proxy_host.textEdited.connect(self.proxy_settings_changed)
self.proxy_port.textEdited.connect(self.proxy_settings_changed)
self.proxy_user.textEdited.connect(self.proxy_settings_changed)
self.proxy_password.textEdited.connect(self.proxy_settings_changed)
self.tor_cb = QCheckBox(_("Use Tor Proxy"))
self.tor_cb.setIcon(read_QIcon("tor_logo.png"))
self.tor_cb.hide()
self.tor_cb.clicked.connect(self.use_tor_proxy)
grid.addWidget(self.tor_cb, 1, 0, 1, 3)
grid.addWidget(self.proxy_cb, 2, 0, 1, 3)
grid.addWidget(HelpButton(_('Proxy settings apply to all connections: with Electrum servers, but also with third-party services.')), 2, 4)
grid.addWidget(self.proxy_mode, 4, 1)
grid.addWidget(self.proxy_host, 4, 2)
grid.addWidget(self.proxy_port, 4, 3)
grid.addWidget(self.proxy_user, 5, 2)
grid.addWidget(self.proxy_password, 5, 3)
grid.setRowStretch(7, 1)
# Blockchain Tab
grid = QGridLayout(blockchain_tab)
msg = ' '.join([
_("Electrum connects to several nodes in order to download block headers and find out the longest blockchain."),
_("This blockchain is used to verify the transactions sent by your transaction server.")
])
self.status_label = QLabel('')
grid.addWidget(QLabel(_('Status') + ':'), 0, 0)
grid.addWidget(self.status_label, 0, 1, 1, 3)
grid.addWidget(HelpButton(msg), 0, 4)
self.autoconnect_cb = QCheckBox(_('Select server automatically'))
self.autoconnect_cb.setEnabled(self.config.is_modifiable('auto_connect'))
self.autoconnect_cb.clicked.connect(self.set_server)
self.autoconnect_cb.clicked.connect(self.update)
msg = ' '.join([
_("If auto-connect is enabled, Electrum will always use a server that is on the longest blockchain."),
_("If it is disabled, you have to choose a server you want to use. Electrum will warn you if your server is lagging.")
])
grid.addWidget(self.autoconnect_cb, 1, 0, 1, 3)
grid.addWidget(HelpButton(msg), 1, 4)
self.server_e = QLineEdit()
self.server_e.setFixedWidth(fixed_width_hostname + fixed_width_port)
self.server_e.editingFinished.connect(self.set_server)
msg = _("Electrum sends your wallet addresses to a single server, in order to receive your transaction history.")
grid.addWidget(QLabel(_('Server') + ':'), 2, 0)
grid.addWidget(self.server_e, 2, 1, 1, 3)
grid.addWidget(HelpButton(msg), 2, 4)
self.height_label = QLabel('')
msg = _('This is the height of your local copy of the blockchain.')
grid.addWidget(QLabel(_('Blockchain') + ':'), 3, 0)
grid.addWidget(self.height_label, 3, 1)
grid.addWidget(HelpButton(msg), 3, 4)
self.split_label = QLabel('')
grid.addWidget(self.split_label, 4, 0, 1, 3)
self.nodes_list_widget = NodesListWidget(self)
grid.addWidget(self.nodes_list_widget, 6, 0, 1, 5)
vbox = QVBoxLayout()
vbox.addWidget(tabs)
self.layout_ = vbox
# tor detector
self.td = td = TorDetector()
td.found_proxy.connect(self.suggest_proxy)
td.start()
self.fill_in_proxy_settings()
self.update()
def clean_up(self):
if self.td:
self.td.found_proxy.disconnect()
self.td.stop()
self.td = None
def check_disable_proxy(self, b):
if not self.config.is_modifiable('proxy'):
b = False
for w in [self.proxy_mode, self.proxy_host, self.proxy_port, self.proxy_user, self.proxy_password]:
w.setEnabled(b)
def enable_set_server(self):
if self.config.is_modifiable('server'):
enabled = not self.autoconnect_cb.isChecked()
self.server_e.setEnabled(enabled)
else:
for w in [self.autoconnect_cb, self.server_e, self.nodes_list_widget]:
w.setEnabled(False)
def update(self):
net_params = self.network.get_parameters()
server = net_params.server
auto_connect = net_params.auto_connect
if not self.server_e.hasFocus():
self.server_e.setText(server.to_friendly_name())
self.autoconnect_cb.setChecked(auto_connect)
height_str = "%d "%(self.network.get_local_height()) + _('blocks')
self.height_label.setText(height_str)
n = len(self.network.get_interfaces())
status = _("Connected to {0} nodes.").format(n) if n > 1 else _("Connected to {0} node.").format(n) if n == 1 else _("Not connected")
self.status_label.setText(status)
chains = self.network.get_blockchains()
if len(chains) > 1:
chain = self.network.blockchain()
forkpoint = chain.get_max_forkpoint()
name = chain.get_name()
msg = _('Chain split detected at block {0}').format(forkpoint) + '\n'
msg += (_('You are following branch') if auto_connect else _('Your server is on branch'))+ ' ' + name
msg += ' (%d %s)' % (chain.get_branch_size(), _('blocks'))
else:
msg = ''
self.split_label.setText(msg)
self.nodes_list_widget.update(network=self.network,
servers=self.network.get_servers(),
use_tor=self.tor_cb.isChecked())
self.enable_set_server()
def fill_in_proxy_settings(self):
proxy_config = self.network.get_parameters().proxy
if not proxy_config:
proxy_config = {"mode": "none", "host": "localhost", "port": "9050"}
b = proxy_config.get('mode') != "none"
self.check_disable_proxy(b)
if b:
self.proxy_cb.setChecked(True)
self.proxy_mode.setCurrentIndex(
self.proxy_mode.findText(str(proxy_config.get("mode").upper())))
self.proxy_host.setText(proxy_config.get("host"))
self.proxy_port.setText(proxy_config.get("port"))
self.proxy_user.setText(proxy_config.get("user", ""))
self.proxy_password.setText(proxy_config.get("password", ""))
def layout(self):
return self.layout_
def follow_branch(self, chain_id):
self.network.run_from_another_thread(self.network.follow_chain_given_id(chain_id))
self.update()
def follow_server(self, server: ServerAddr):
self.network.run_from_another_thread(self.network.follow_chain_given_server(server))
self.update()
def accept(self):
pass
def set_server(self):
net_params = self.network.get_parameters()
try:
server = ServerAddr.from_str_with_inference(str(self.server_e.text()))
if not server: raise Exception("failed to parse")
except Exception:
return
net_params = net_params._replace(server=server,
auto_connect=self.autoconnect_cb.isChecked())
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def set_proxy(self):
net_params = self.network.get_parameters()
if self.proxy_cb.isChecked():
proxy = {'mode':str(self.proxy_mode.currentText()).lower(),
'host':str(self.proxy_host.text()),
'port':str(self.proxy_port.text()),
'user':str(self.proxy_user.text()),
'password':str(self.proxy_password.text())}
else:
proxy = None
self.tor_cb.setChecked(False)
net_params = net_params._replace(proxy=proxy)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def suggest_proxy(self, found_proxy):
if found_proxy is None:
self.tor_cb.hide()
return
self.tor_proxy = found_proxy
self.tor_cb.setText("Use Tor proxy at port " + str(found_proxy[1]))
if (self.proxy_cb.isChecked()
and self.proxy_mode.currentIndex() == self.proxy_mode.findText('SOCKS5')
and self.proxy_host.text() == "127.0.0.1"
and self.proxy_port.text() == str(found_proxy[1])):
self.tor_cb.setChecked(True)
self.tor_cb.show()
def use_tor_proxy(self, use_it):
if not use_it:
self.proxy_cb.setChecked(False)
else:
socks5_mode_index = self.proxy_mode.findText('SOCKS5')
if socks5_mode_index == -1:
_logger.info("can't find proxy_mode 'SOCKS5'")
return
self.proxy_mode.setCurrentIndex(socks5_mode_index)
self.proxy_host.setText("127.0.0.1")
self.proxy_port.setText(str(self.tor_proxy[1]))
self.proxy_user.setText("")
self.proxy_password.setText("")
self.tor_cb.setChecked(True)
self.proxy_cb.setChecked(True)
self.check_disable_proxy(use_it)
self.set_proxy()
def proxy_settings_changed(self):
self.tor_cb.setChecked(False)
class TorDetector(QThread):
found_proxy = pyqtSignal(object)
def __init__(self):
QThread.__init__(self)
self._stop_event = threading.Event()
def run(self):
# Probable ports for Tor to listen at
ports = [9050, 9150]
while True:
for p in ports:
net_addr = ("127.0.0.1", p)
if TorDetector.is_tor_port(net_addr):
self.found_proxy.emit(net_addr)
break
else:
self.found_proxy.emit(None)
stopping = self._stop_event.wait(10)
if stopping:
return
def stop(self):
self._stop_event.set()
self.exit()
self.wait()
@staticmethod
def is_tor_port(net_addr: Tuple[str, int]) -> bool:
try:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.settimeout(0.1)
s.connect(net_addr)
# Tor responds uniquely to HTTP-like requests
s.send(b"GET\n")
if b"Tor is not an HTTP Proxy" in s.recv(1024):
return True
except socket.error:
pass
return False
|
{
"content_hash": "a62a0c9c3d487181ee4cef0fcfd6741a",
"timestamp": "",
"source": "github",
"line_count": 469,
"max_line_length": 146,
"avg_line_length": 40.25799573560768,
"alnum_prop": 0.6020867538795615,
"repo_name": "spesmilo/electrum",
"id": "cb2d4297cd9ea8ca64b67de2532e8e2720a40095",
"size": "20046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum/gui/qt/network_dialog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "13136"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "2929"
},
{
"name": "Makefile",
"bytes": "2185"
},
{
"name": "NSIS",
"bytes": "7681"
},
{
"name": "Python",
"bytes": "5400804"
},
{
"name": "QML",
"bytes": "355804"
},
{
"name": "Ruby",
"bytes": "16748"
},
{
"name": "Shell",
"bytes": "105118"
},
{
"name": "kvlang",
"bytes": "67438"
}
],
"symlink_target": ""
}
|
"""Creates Homewizard Energy switch entities."""
from __future__ import annotations
from typing import Any
from homeassistant.components.switch import SwitchDeviceClass, SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import HWEnergyDeviceUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up switches."""
coordinator: HWEnergyDeviceUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
if coordinator.api.state:
async_add_entities(
[
HWEnergyMainSwitchEntity(coordinator, entry),
HWEnergySwitchLockEntity(coordinator, entry),
]
)
class HWEnergySwitchEntity(CoordinatorEntity, SwitchEntity):
"""Representation switchable entity."""
coordinator: HWEnergyDeviceUpdateCoordinator
def __init__(
self,
coordinator: HWEnergyDeviceUpdateCoordinator,
entry: ConfigEntry,
key: str,
) -> None:
"""Initialize the switch."""
super().__init__(coordinator)
self._attr_unique_id = f"{entry.unique_id}_{key}"
self._attr_device_info = {
"name": entry.title,
"manufacturer": "HomeWizard",
"sw_version": coordinator.data["device"].firmware_version,
"model": coordinator.data["device"].product_type,
"identifiers": {(DOMAIN, coordinator.data["device"].serial)},
}
class HWEnergyMainSwitchEntity(HWEnergySwitchEntity):
"""Representation of the main power switch."""
_attr_device_class = SwitchDeviceClass.OUTLET
def __init__(
self, coordinator: HWEnergyDeviceUpdateCoordinator, entry: ConfigEntry
) -> None:
"""Initialize the switch."""
super().__init__(coordinator, entry, "power_on")
# Config attributes
self._attr_name = f"{entry.title} Switch"
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the switch on."""
await self.coordinator.api.state.set(power_on=True)
await self.coordinator.async_refresh()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the switch off."""
await self.coordinator.api.state.set(power_on=False)
await self.coordinator.async_refresh()
@property
def available(self) -> bool:
"""
Return availability of power_on.
This switch becomes unavailable when switch_lock is enabled.
"""
return super().available and not self.coordinator.api.state.switch_lock
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return bool(self.coordinator.api.state.power_on)
class HWEnergySwitchLockEntity(HWEnergySwitchEntity):
"""
Representation of the switch-lock configuration.
Switch-lock is a feature that forces the relay in 'on' state.
It disables any method that can turn of the relay.
"""
_attr_device_class = SwitchDeviceClass.SWITCH
_attr_entity_category = EntityCategory.CONFIG
def __init__(
self, coordinator: HWEnergyDeviceUpdateCoordinator, entry: ConfigEntry
) -> None:
"""Initialize the switch."""
super().__init__(coordinator, entry, "switch_lock")
# Config attributes
self._attr_name = f"{entry.title} Switch Lock"
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn switch-lock on."""
await self.coordinator.api.state.set(switch_lock=True)
await self.coordinator.async_refresh()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn switch-lock off."""
await self.coordinator.api.state.set(switch_lock=False)
await self.coordinator.async_refresh()
@property
def is_on(self) -> bool:
"""Return true if switch is on."""
return bool(self.coordinator.api.state.switch_lock)
|
{
"content_hash": "0ca15bae7e43b338d3615bf63d1e31cb",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 84,
"avg_line_length": 32.96124031007752,
"alnum_prop": 0.660865475070555,
"repo_name": "GenericStudent/home-assistant",
"id": "7860370baa7a2dcace0eddbb4316a9f1a8238664",
"size": "4252",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/homewizard/switch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "3070"
},
{
"name": "Python",
"bytes": "44491729"
},
{
"name": "Shell",
"bytes": "5092"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from __future__ import absolute_import
from collections import namedtuple
import logging
import re
import os
import sys
from operator import attrgetter
import six
from docker.errors import APIError
from docker.utils import create_host_config, LogConfig
from docker.utils.ports import build_port_bindings, split_port
from . import __version__
from .config import DOCKER_CONFIG_KEYS, merge_environment
from .const import (
DEFAULT_TIMEOUT,
LABEL_CONTAINER_NUMBER,
LABEL_ONE_OFF,
LABEL_PROJECT,
LABEL_SERVICE,
LABEL_VERSION,
LABEL_CONFIG_HASH,
)
from .container import Container
from .legacy import check_for_legacy_containers
from .progress_stream import stream_output, StreamOutputError
from .utils import json_hash, parallel_execute
from .config.validation import VALID_NAME_CHARS
log = logging.getLogger(__name__)
DOCKER_START_KEYS = [
'cap_add',
'cap_drop',
'devices',
'dns',
'dns_search',
'env_file',
'extra_hosts',
'read_only',
'net',
'log_driver',
'log_opt',
'mem_limit',
'memswap_limit',
'pid',
'privileged',
'restart',
'volumes_from',
'security_opt',
]
class BuildError(Exception):
def __init__(self, service, reason):
self.service = service
self.reason = reason
class ConfigError(ValueError):
pass
class NeedsBuildError(Exception):
def __init__(self, service):
self.service = service
class NoSuchImageError(Exception):
pass
VolumeSpec = namedtuple('VolumeSpec', 'external internal mode')
ServiceName = namedtuple('ServiceName', 'project service number')
ConvergencePlan = namedtuple('ConvergencePlan', 'action containers')
class Service(object):
def __init__(self, name, client=None, project='default', links=None, external_links=None, volumes_from=None, net=None, **options):
if not re.match('^%s+$' % VALID_NAME_CHARS, project):
raise ConfigError('Invalid project name "%s" - only %s are allowed' % (project, VALID_NAME_CHARS))
self.name = name
self.client = client
self.project = project
self.links = links or []
self.external_links = external_links or []
self.volumes_from = volumes_from or []
self.net = net or None
self.options = options
def containers(self, stopped=False, one_off=False, filters={}):
filters.update({'label': self.labels(one_off=one_off)})
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=stopped,
filters=filters)])
if not containers:
check_for_legacy_containers(
self.client,
self.project,
[self.name],
)
return containers
def get_container(self, number=1):
"""Return a :class:`compose.container.Container` for this service. The
container must be active, and match `number`.
"""
labels = self.labels() + ['{0}={1}'.format(LABEL_CONTAINER_NUMBER, number)]
for container in self.client.containers(filters={'label': labels}):
return Container.from_ps(self.client, container)
raise ValueError("No container found for %s_%s" % (self.name, number))
def start(self, **options):
for c in self.containers(stopped=True):
self.start_container_if_stopped(c, **options)
# TODO: remove these functions, project takes care of starting/stopping,
def stop(self, **options):
for c in self.containers():
log.info("Stopping %s..." % c.name)
c.stop(**options)
def pause(self, **options):
for c in self.containers(filters={'status': 'running'}):
log.info("Pausing %s..." % c.name)
c.pause(**options)
def unpause(self, **options):
for c in self.containers(filters={'status': 'paused'}):
log.info("Unpausing %s..." % c.name)
c.unpause()
def kill(self, **options):
for c in self.containers():
log.info("Killing %s..." % c.name)
c.kill(**options)
def restart(self, **options):
for c in self.containers():
log.info("Restarting %s..." % c.name)
c.restart(**options)
# end TODO
def scale(self, desired_num, timeout=DEFAULT_TIMEOUT):
"""
Adjusts the number of containers to the specified number and ensures
they are running.
- creates containers until there are at least `desired_num`
- stops containers until there are at most `desired_num` running
- starts containers until there are at least `desired_num` running
- removes all stopped containers
"""
if self.custom_container_name() and desired_num > 1:
log.warn('The "%s" service is using the custom container name "%s". '
'Docker requires each container to have a unique name. '
'Remove the custom name to scale the service.'
% (self.name, self.custom_container_name()))
if self.specifies_host_port():
log.warn('The "%s" service specifies a port on the host. If multiple containers '
'for this service are created on a single host, the port will clash.'
% self.name)
def create_and_start(service, number):
container = service.create_container(number=number, quiet=True)
container.start()
return container
running_containers = self.containers(stopped=False)
num_running = len(running_containers)
if desired_num == num_running:
# do nothing as we already have the desired number
log.info('Desired container number already achieved')
return
if desired_num > num_running:
# we need to start/create until we have desired_num
all_containers = self.containers(stopped=True)
if num_running != len(all_containers):
# we have some stopped containers, let's start them up again
stopped_containers = sorted([c for c in all_containers if not c.is_running], key=attrgetter('number'))
num_stopped = len(stopped_containers)
if num_stopped + num_running > desired_num:
num_to_start = desired_num - num_running
containers_to_start = stopped_containers[:num_to_start]
else:
containers_to_start = stopped_containers
parallel_execute(
objects=containers_to_start,
obj_callable=lambda c: c.start(),
msg_index=lambda c: c.name,
msg="Starting"
)
num_running += len(containers_to_start)
num_to_create = desired_num - num_running
next_number = self._next_container_number()
container_numbers = [
number for number in range(
next_number, next_number + num_to_create
)
]
parallel_execute(
objects=container_numbers,
obj_callable=lambda n: create_and_start(service=self, number=n),
msg_index=lambda n: n,
msg="Creating and starting"
)
if desired_num < num_running:
num_to_stop = num_running - desired_num
sorted_running_containers = sorted(running_containers, key=attrgetter('number'))
containers_to_stop = sorted_running_containers[-num_to_stop:]
parallel_execute(
objects=containers_to_stop,
obj_callable=lambda c: c.stop(timeout=timeout),
msg_index=lambda c: c.name,
msg="Stopping"
)
self.remove_stopped()
def remove_stopped(self, **options):
containers = [c for c in self.containers(stopped=True) if not c.is_running]
parallel_execute(
objects=containers,
obj_callable=lambda c: c.remove(**options),
msg_index=lambda c: c.name,
msg="Removing"
)
def create_container(self,
one_off=False,
do_build=True,
previous_container=None,
number=None,
quiet=False,
**override_options):
"""
Create a container for this service. If the image doesn't exist, attempt to pull
it.
"""
self.ensure_image_exists(
do_build=do_build,
)
container_options = self._get_container_create_options(
override_options,
number or self._next_container_number(one_off=one_off),
one_off=one_off,
previous_container=previous_container,
)
if 'name' in container_options and not quiet:
log.info("Creating %s..." % container_options['name'])
return Container.create(self.client, **container_options)
def ensure_image_exists(self,
do_build=True):
try:
self.image()
return
except NoSuchImageError:
pass
if self.can_be_built():
if do_build:
self.build()
else:
raise NeedsBuildError(self)
else:
self.pull()
def image(self):
try:
return self.client.inspect_image(self.image_name)
except APIError as e:
if e.response.status_code == 404 and e.explanation and 'No such image' in str(e.explanation):
raise NoSuchImageError("Image '{}' not found".format(self.image_name))
else:
raise
@property
def image_name(self):
if self.can_be_built():
return self.full_name
else:
return self.options['image']
def convergence_plan(self,
allow_recreate=True,
force_recreate=False):
if force_recreate and not allow_recreate:
raise ValueError("force_recreate and allow_recreate are in conflict")
containers = self.containers(stopped=True)
if not containers:
return ConvergencePlan('create', [])
if not allow_recreate:
return ConvergencePlan('start', containers)
if force_recreate or self._containers_have_diverged(containers):
return ConvergencePlan('recreate', containers)
stopped = [c for c in containers if not c.is_running]
if stopped:
return ConvergencePlan('start', stopped)
return ConvergencePlan('noop', containers)
def _containers_have_diverged(self, containers):
config_hash = None
try:
config_hash = self.config_hash
except NoSuchImageError as e:
log.debug(
'Service %s has diverged: %s',
self.name, six.text_type(e),
)
return True
has_diverged = False
for c in containers:
container_config_hash = c.labels.get(LABEL_CONFIG_HASH, None)
if container_config_hash != config_hash:
log.debug(
'%s has diverged: %s != %s',
c.name, container_config_hash, config_hash,
)
has_diverged = True
return has_diverged
def execute_convergence_plan(self,
plan,
do_build=True,
timeout=DEFAULT_TIMEOUT):
(action, containers) = plan
if action == 'create':
container = self.create_container(
do_build=do_build,
)
self.start_container(container)
return [container]
elif action == 'recreate':
return [
self.recreate_container(
c,
timeout=timeout
)
for c in containers
]
elif action == 'start':
for c in containers:
self.start_container_if_stopped(c)
return containers
elif action == 'noop':
for c in containers:
log.info("%s is up-to-date" % c.name)
return containers
else:
raise Exception("Invalid action: {}".format(action))
def recreate_container(self,
container,
timeout=DEFAULT_TIMEOUT):
"""Recreate a container.
The original container is renamed to a temporary name so that data
volumes can be copied to the new container, before the original
container is removed.
"""
log.info("Recreating %s..." % container.name)
try:
container.stop(timeout=timeout)
except APIError as e:
if (e.response.status_code == 500
and e.explanation
and 'no such process' in str(e.explanation)):
pass
else:
raise
# Use a hopefully unique container name by prepending the short id
self.client.rename(
container.id,
'%s_%s' % (container.short_id, container.name))
new_container = self.create_container(
do_build=False,
previous_container=container,
number=container.labels.get(LABEL_CONTAINER_NUMBER),
quiet=True,
)
self.start_container(new_container)
container.remove()
return new_container
def start_container_if_stopped(self, container):
if container.is_running:
return container
else:
log.info("Starting %s..." % container.name)
return self.start_container(container)
def start_container(self, container):
container.start()
return container
def remove_duplicate_containers(self, timeout=DEFAULT_TIMEOUT):
for c in self.duplicate_containers():
log.info('Removing %s...' % c.name)
c.stop(timeout=timeout)
c.remove()
def duplicate_containers(self):
containers = sorted(
self.containers(stopped=True),
key=lambda c: c.get('Created'),
)
numbers = set()
for c in containers:
if c.number in numbers:
yield c
else:
numbers.add(c.number)
@property
def config_hash(self):
return json_hash(self.config_dict())
def config_dict(self):
return {
'options': self.options,
'image_id': self.image()['Id'],
}
def get_dependency_names(self):
net_name = self.get_net_name()
return (self.get_linked_names() +
self.get_volumes_from_names() +
([net_name] if net_name else []))
def get_linked_names(self):
return [s.name for (s, _) in self.links]
def get_volumes_from_names(self):
return [s.name for s in self.volumes_from if isinstance(s, Service)]
def get_net_name(self):
if isinstance(self.net, Service):
return self.net.name
else:
return
def get_container_name(self, number, one_off=False):
# TODO: Implement issue #652 here
return build_container_name(self.project, self.name, number, one_off)
# TODO: this would benefit from github.com/docker/docker/pull/11943
# to remove the need to inspect every container
def _next_container_number(self, one_off=False):
containers = filter(None, [
Container.from_ps(self.client, container)
for container in self.client.containers(
all=True,
filters={'label': self.labels(one_off=one_off)})
])
numbers = [c.number for c in containers]
return 1 if not numbers else max(numbers) + 1
def _get_links(self, link_to_self):
links = []
for service, link_name in self.links:
for container in service.containers():
links.append((container.name, link_name or service.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
if link_to_self:
for container in self.containers():
links.append((container.name, self.name))
links.append((container.name, container.name))
links.append((container.name, container.name_without_project))
for external_link in self.external_links:
if ':' not in external_link:
link_name = external_link
else:
external_link, link_name = external_link.split(':')
links.append((external_link, link_name))
return links
def _get_volumes_from(self):
volumes_from = []
for volume_source in self.volumes_from:
if isinstance(volume_source, Service):
containers = volume_source.containers(stopped=True)
if not containers:
volumes_from.append(volume_source.create_container().id)
else:
volumes_from.extend(map(attrgetter('id'), containers))
elif isinstance(volume_source, Container):
volumes_from.append(volume_source.id)
return volumes_from
def _get_net(self):
if not self.net:
return None
if isinstance(self.net, Service):
containers = self.net.containers()
if len(containers) > 0:
net = 'container:' + containers[0].id
else:
log.warning("Warning: Service %s is trying to use reuse the network stack "
"of another service that is not running." % (self.net.name))
net = None
elif isinstance(self.net, Container):
net = 'container:' + self.net.id
else:
net = self.net
return net
def _get_container_create_options(
self,
override_options,
number,
one_off=False,
previous_container=None):
add_config_hash = (not one_off and not override_options)
container_options = dict(
(k, self.options[k])
for k in DOCKER_CONFIG_KEYS if k in self.options)
container_options.update(override_options)
if self.custom_container_name() and not one_off:
container_options['name'] = self.custom_container_name()
else:
container_options['name'] = self.get_container_name(number, one_off)
if add_config_hash:
config_hash = self.config_hash
if 'labels' not in container_options:
container_options['labels'] = {}
container_options['labels'][LABEL_CONFIG_HASH] = config_hash
log.debug("Added config hash: %s" % config_hash)
if 'detach' not in container_options:
container_options['detach'] = True
# If a qualified hostname was given, split it into an
# unqualified hostname and a domainname unless domainname
# was also given explicitly. This matches the behavior of
# the official Docker CLI in that scenario.
if ('hostname' in container_options
and 'domainname' not in container_options
and '.' in container_options['hostname']):
parts = container_options['hostname'].partition('.')
container_options['hostname'] = parts[0]
container_options['domainname'] = parts[2]
if 'ports' in container_options or 'expose' in self.options:
ports = []
all_ports = container_options.get('ports', []) + self.options.get('expose', [])
for port_range in all_ports:
internal_range, _ = split_port(port_range)
for port in internal_range:
port = str(port)
if '/' in port:
port = tuple(port.split('/'))
ports.append(port)
container_options['ports'] = ports
override_options['binds'] = merge_volume_bindings(
container_options.get('volumes') or [],
previous_container)
if 'volumes' in container_options:
container_options['volumes'] = dict(
(parse_volume_spec(v).internal, {})
for v in container_options['volumes'])
container_options['environment'] = merge_environment(
self.options.get('environment'),
override_options.get('environment'))
if previous_container:
container_options['environment']['affinity:container'] = ('=' + previous_container.id)
container_options['image'] = self.image_name
container_options['labels'] = build_container_labels(
container_options.get('labels', {}),
self.labels(one_off=one_off),
number)
# Delete options which are only used when starting
for key in DOCKER_START_KEYS:
container_options.pop(key, None)
container_options['host_config'] = self._get_container_host_config(
override_options,
one_off=one_off)
return container_options
def _get_container_host_config(self, override_options, one_off=False):
options = dict(self.options, **override_options)
port_bindings = build_port_bindings(options.get('ports') or [])
privileged = options.get('privileged', False)
cap_add = options.get('cap_add', None)
cap_drop = options.get('cap_drop', None)
log_config = LogConfig(
type=options.get('log_driver', 'json-file'),
config=options.get('log_opt', None)
)
pid = options.get('pid', None)
security_opt = options.get('security_opt', None)
dns = options.get('dns', None)
if isinstance(dns, six.string_types):
dns = [dns]
dns_search = options.get('dns_search', None)
if isinstance(dns_search, six.string_types):
dns_search = [dns_search]
restart = parse_restart_spec(options.get('restart', None))
extra_hosts = build_extra_hosts(options.get('extra_hosts', None))
read_only = options.get('read_only', None)
devices = options.get('devices', None)
return create_host_config(
links=self._get_links(link_to_self=one_off),
port_bindings=port_bindings,
binds=options.get('binds'),
volumes_from=self._get_volumes_from(),
privileged=privileged,
network_mode=self._get_net(),
devices=devices,
dns=dns,
dns_search=dns_search,
restart_policy=restart,
cap_add=cap_add,
cap_drop=cap_drop,
mem_limit=options.get('mem_limit'),
memswap_limit=options.get('memswap_limit'),
log_config=log_config,
extra_hosts=extra_hosts,
read_only=read_only,
pid_mode=pid,
security_opt=security_opt
)
def build(self, no_cache=False):
log.info('Building %s...' % self.name)
path = six.binary_type(self.options['build'])
build_output = self.client.build(
path=path,
tag=self.image_name,
stream=True,
rm=True,
pull=False,
nocache=no_cache,
dockerfile=self.options.get('dockerfile', None),
)
try:
all_events = stream_output(build_output, sys.stdout)
except StreamOutputError as e:
raise BuildError(self, unicode(e))
# Ensure the HTTP connection is not reused for another
# streaming command, as the Docker daemon can sometimes
# complain about it
self.client.close()
image_id = None
for event in all_events:
if 'stream' in event:
match = re.search(r'Successfully built ([0-9a-f]+)', event.get('stream', ''))
if match:
image_id = match.group(1)
if image_id is None:
raise BuildError(self, event if all_events else 'Unknown')
return image_id
def can_be_built(self):
return 'build' in self.options
@property
def full_name(self):
"""
The tag to give to images built for this service.
"""
return '%s_%s' % (self.project, self.name)
def labels(self, one_off=False):
return [
'{0}={1}'.format(LABEL_PROJECT, self.project),
'{0}={1}'.format(LABEL_SERVICE, self.name),
'{0}={1}'.format(LABEL_ONE_OFF, "True" if one_off else "False")
]
def custom_container_name(self):
return self.options.get('container_name')
def specifies_host_port(self):
for port in self.options.get('ports', []):
if ':' in str(port):
return True
return False
def pull(self):
if 'image' not in self.options:
return
repo, tag, separator = parse_repository_tag(self.options['image'])
tag = tag or 'latest'
log.info('Pulling %s (%s%s%s)...' % (self.name, repo, separator, tag))
output = self.client.pull(
repo,
tag=tag,
stream=True,
)
stream_output(output, sys.stdout)
# Names
def build_container_name(project, service, number, one_off=False):
bits = [project, service]
if one_off:
bits.append('run')
return '_'.join(bits + [str(number)])
# Images
def parse_repository_tag(repo_path):
"""Splits image identification into base image path, tag/digest
and it's separator.
Example:
>>> parse_repository_tag('user/repo@sha256:digest')
('user/repo', 'sha256:digest', '@')
>>> parse_repository_tag('user/repo:v1')
('user/repo', 'v1', ':')
"""
tag_separator = ":"
digest_separator = "@"
if digest_separator in repo_path:
repo, tag = repo_path.rsplit(digest_separator, 1)
return repo, tag, digest_separator
repo, tag = repo_path, ""
if tag_separator in repo_path:
repo, tag = repo_path.rsplit(tag_separator, 1)
if "/" in tag:
repo, tag = repo_path, ""
return repo, tag, tag_separator
# Volumes
def merge_volume_bindings(volumes_option, previous_container):
"""Return a list of volume bindings for a container. Container data volumes
are replaced by those from the previous container.
"""
volume_bindings = dict(
build_volume_binding(parse_volume_spec(volume))
for volume in volumes_option or []
if ':' in volume)
if previous_container:
volume_bindings.update(
get_container_data_volumes(previous_container, volumes_option))
return volume_bindings.values()
def get_container_data_volumes(container, volumes_option):
"""Find the container data volumes that are in `volumes_option`, and return
a mapping of volume bindings for those volumes.
"""
volumes = []
volumes_option = volumes_option or []
container_volumes = container.get('Volumes') or {}
image_volumes = container.image_config['ContainerConfig'].get('Volumes') or {}
for volume in set(volumes_option + image_volumes.keys()):
volume = parse_volume_spec(volume)
# No need to preserve host volumes
if volume.external:
continue
volume_path = container_volumes.get(volume.internal)
# New volume, doesn't exist in the old container
if not volume_path:
continue
# Copy existing volume from old container
volume = volume._replace(external=volume_path)
volumes.append(build_volume_binding(volume))
return dict(volumes)
def build_volume_binding(volume_spec):
return volume_spec.internal, "{}:{}:{}".format(*volume_spec)
def parse_volume_spec(volume_config):
parts = volume_config.split(':')
if len(parts) > 3:
raise ConfigError("Volume %s has incorrect format, should be "
"external:internal[:mode]" % volume_config)
if len(parts) == 1:
external = None
internal = os.path.normpath(parts[0])
else:
external = os.path.normpath(parts[0])
internal = os.path.normpath(parts[1])
mode = parts[2] if len(parts) == 3 else 'rw'
return VolumeSpec(external, internal, mode)
# Labels
def build_container_labels(label_options, service_labels, number, one_off=False):
labels = label_options or {}
labels.update(label.split('=', 1) for label in service_labels)
labels[LABEL_CONTAINER_NUMBER] = str(number)
labels[LABEL_VERSION] = __version__
return labels
# Restart policy
def parse_restart_spec(restart_config):
if not restart_config:
return None
parts = restart_config.split(':')
if len(parts) > 2:
raise ConfigError("Restart %s has incorrect format, should be "
"mode[:max_retry]" % restart_config)
if len(parts) == 2:
name, max_retry_count = parts
else:
name, = parts
max_retry_count = 0
return {'Name': name, 'MaximumRetryCount': int(max_retry_count)}
# Extra hosts
def build_extra_hosts(extra_hosts_config):
if not extra_hosts_config:
return {}
if isinstance(extra_hosts_config, list):
extra_hosts_dict = {}
for extra_hosts_line in extra_hosts_config:
if not isinstance(extra_hosts_line, six.string_types):
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
host, ip = extra_hosts_line.split(':')
extra_hosts_dict.update({host.strip(): ip.strip()})
extra_hosts_config = extra_hosts_dict
if isinstance(extra_hosts_config, dict):
return extra_hosts_config
raise ConfigError(
"extra_hosts_config \"%s\" must be either a list of strings or a string->string mapping," %
extra_hosts_config
)
|
{
"content_hash": "db95c7617022fe8f704cdca7980d0b38",
"timestamp": "",
"source": "github",
"line_count": 946,
"max_line_length": 134,
"avg_line_length": 32.494714587737846,
"alnum_prop": 0.5690956408588159,
"repo_name": "bbirand/compose",
"id": "7df5618c55b27d9c50e07f49da3122e9675f10ff",
"size": "30740",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "compose/service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "315212"
},
{
"name": "Shell",
"bytes": "16679"
}
],
"symlink_target": ""
}
|
"""
Test WSGI basics and provide some helper functions for other WSGI tests.
"""
import testtools
import routes
import webob
from volt.common import wsgi
class WSGITest(testtools.TestCase):
def test_wsgi(self):
class Application(wsgi.Application):
"""Dummy application to test debug."""
def __call__(self, environ, start_response):
start_response("200", [("X-Test", "checking")])
return ['Test result']
application = wsgi.Debug(Application())
result = webob.Request.blank('/').get_response(application)
self.assertEqual(result.body, "Test result")
def test_router(self):
class Application(wsgi.Application):
"""Test application to call from router."""
def __call__(self, environ, start_response):
start_response("200", [])
return ['Router result']
class Router(wsgi.Router):
"""Test router."""
def __init__(self):
mapper = routes.Mapper()
mapper.connect("/test", controller=Application())
super(Router, self).__init__(mapper)
result = webob.Request.blank('/test').get_response(Router())
self.assertEqual(result.body, "Router result")
result = webob.Request.blank('/bad').get_response(Router())
self.assertNotEqual(result.body, "Router result")
|
{
"content_hash": "09cdd6fb9fde1b3d558f645dc84dca8c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 72,
"avg_line_length": 30.425531914893618,
"alnum_prop": 0.5895104895104896,
"repo_name": "vmthunder/volt",
"id": "3927f29d565279e4475858b374b225005472314c",
"size": "2200",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "volt/tests/api/test_wsgi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "479308"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sponsors', '0063_auto_20211220_1422'),
]
operations = [
migrations.AddField(
model_name='sponsorshippackage',
name='slug',
field=models.SlugField(default='', help_text='Internal identifier used to reference this package.'),
),
]
|
{
"content_hash": "bfbcfbca6ffa8a130a88018d5a5f4bf3",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 112,
"avg_line_length": 25.5,
"alnum_prop": 0.6102941176470589,
"repo_name": "proevo/pythondotorg",
"id": "bf14023b4cccd5f287f690af8cad69f700d45c07",
"size": "458",
"binary": false,
"copies": "3",
"ref": "refs/heads/dependabot/pip/django-allauth-0.51.0",
"path": "sponsors/migrations/0064_sponsorshippackage_slug.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "711916"
},
{
"name": "JavaScript",
"bytes": "314514"
},
{
"name": "Makefile",
"bytes": "6811"
},
{
"name": "Python",
"bytes": "1448691"
},
{
"name": "Ruby",
"bytes": "218314"
},
{
"name": "Shell",
"bytes": "6730"
}
],
"symlink_target": ""
}
|
import sys
from cx_Freeze import setup, Executable
exe = Executable(
script="MarioMakerLevelsBot.py",
initScript=None,
base='Win32GUI',
targetDir="dist",
targetName="MarioMakerLevelsBot.exe",
compress=True,
copyDependentFiles=True,
appendScriptToExe=True,
appendScriptToLibrary=False,
icon="ui/MarioMakerLevelsBot.ico", # TODO: add an icon?
)
setup(
version=1.0,
name="MarioMakerLevelsBot",
author="Raphaël Lejolivet",
description="Pulls levels from Twitch chat into a pretty interface",
options = {"build_exe": {"path": sys.path,
"append_script_to_exe":False,
"build_exe":"dist/bin",
"compressed":True,
"copy_dependent_files":True,
"create_shared_zip":True,
"include_in_shared_zip":True,
"optimize":2,
}
},
executables = [exe]
)
|
{
"content_hash": "b8537fc29da3e98073c37159565edfa1",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 72,
"avg_line_length": 30.085714285714285,
"alnum_prop": 0.5251661918328585,
"repo_name": "RLejolivet/MarioMakerLevelsBot",
"id": "9391f6b3399a7eeb565a73375f2405893501fc10",
"size": "1056",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "MarioMakerLevelsBot/MarioMakerLevelsBot/setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62527"
}
],
"symlink_target": ""
}
|
import asyncio
import discord
from datetime import datetime
from discord.ext import commands
from shutil import copyfile
import time
import json
import os
import re
from Cogs import DisplayName
from Cogs import Nullify
class Welcome:
def __init__(self, bot, settings):
self.bot = bot
self.settings = settings
self.regexUserName = re.compile(r"\[\[[user]+\]\]", re.IGNORECASE)
self.regexUserPing = re.compile(r"\[\[[atuser]+\]\]", re.IGNORECASE)
self.regexServer = re.compile(r"\[\[[server]+\]\]", re.IGNORECASE)
async def onjoin(self, member, server):
# Welcome
welcomeChannel = self.settings.getServerStat(server, "WelcomeChannel")
if welcomeChannel:
for channel in server.channels:
if channel.id == welcomeChannel:
welcomeChannel = channel
break
if welcomeChannel:
await self._welcome(member, server, welcomeChannel)
else:
await self._welcome(member, server)
async def onleave(self, member, server):
# Goodbye
if not server in self.bot.servers:
# We're not on this server - and can't say anything there
return
welcomeChannel = self.settings.getServerStat(server, "WelcomeChannel")
if welcomeChannel:
for channel in server.channels:
if channel.id == welcomeChannel:
welcomeChannel = channel
break
if welcomeChannel:
await self._goodbye(member, server, welcomeChannel)
else:
await self._goodbye(member, server)
@commands.command(pass_context=True)
async def setwelcome(self, ctx, *, message = None):
"""Sets the welcome message for your server (bot-admin only). [[user]] = user name, [[atuser]] = user mention, [[server]] = server name"""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.server, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if aRole['ID'] == role.id:
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await self.bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')
return
if message == None:
self.settings.setServerStat(ctx.message.server, "Welcome", None)
await self.bot.send_message(ctx.message.channel, 'Welcome message removed!')
return
self.settings.setServerStat(ctx.message.server, "Welcome", message)
await self.bot.send_message(ctx.message.channel, 'Welcome message updated!\n\nHere\'s a preview:')
await self._welcome(ctx.message.author, ctx.message.server, ctx.message.channel)
@commands.command(pass_context=True)
async def testwelcome(self, ctx, *, member = None):
"""Prints the current welcome message (bot-admin only)."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.server, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if aRole['ID'] == role.id:
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await self.bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')
return
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.server)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await self.bot.send_message(ctx.message.channel, msg)
return
# Here we have found a member, and stuff.
# Let's make sure we have a message
message = self.settings.getServerStat(ctx.message.server, "Welcome")
if message == None:
await self.bot.send_message(ctx.message.channel, 'Welcome message not setup. You can do so with the `{}setwelcome [message]` command.'.format(ctx.prefix))
return
await self._welcome(member, ctx.message.server, ctx.message.channel)
# Print the welcome channel
welcomeChannel = self.settings.getServerStat(ctx.message.server, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.server.channels:
if channel.id == welcomeChannel:
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current welcome channel is **{}**.'.format(welcomeChannel.name)
else:
msg = 'The current welcome channel is the server\'s default channel (**{}**).'.format(ctx.message.server.default_channel.name)
await self.bot.send_message(ctx.message.channel, msg)
@commands.command(pass_context=True)
async def setgoodbye(self, ctx, *, message = None):
"""Sets the goodbye message for your server (bot-admin only). [[user]] = user name, [[atuser]] = user mention, [[server]] = server name"""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.server, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if aRole['ID'] == role.id:
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await self.bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')
return
if message == None:
self.settings.setServerStat(ctx.message.server, "Goodbye", None)
await self.bot.send_message(ctx.message.channel, 'Goodbye message removed!')
return
self.settings.setServerStat(ctx.message.server, "Goodbye", message)
await self.bot.send_message(ctx.message.channel, 'Goodbye message updated!\n\nHere\'s a preview:')
await self._goodbye(ctx.message.author, ctx.message.server, ctx.message.channel)
@commands.command(pass_context=True)
async def testgoodbye(self, ctx, *, member = None):
"""Prints the current goodbye message (bot-admin only)."""
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(ctx.message.server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.server, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if aRole['ID'] == role.id:
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await self.bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')
return
if member == None:
member = ctx.message.author
if type(member) is str:
memberName = member
member = DisplayName.memberForName(memberName, ctx.message.server)
if not member:
msg = 'I couldn\'t find *{}*...'.format(memberName)
# Check for suppress
if suppress:
msg = Nullify.clean(msg)
await self.bot.send_message(ctx.message.channel, msg)
return
# Here we have found a member, and stuff.
# Let's make sure we have a message
message = self.settings.getServerStat(ctx.message.server, "Goodbye")
if message == None:
await self.bot.send_message(ctx.message.channel, 'Goodbye message not setup. You can do so with the `{}setgoodbye [message]` command.'.format(ctx.prefix))
return
await self._goodbye(member, ctx.message.server, ctx.message.channel)
# Print the goodbye channel
welcomeChannel = self.settings.getServerStat(ctx.message.server, "WelcomeChannel")
if welcomeChannel:
for channel in ctx.message.server.channels:
if channel.id == welcomeChannel:
welcomeChannel = channel
break
if welcomeChannel:
msg = 'The current goodbye channel is **{}**.'.format(welcomeChannel.name)
else:
msg = 'The current goodbye channel is the server\'s default channel (**{}**).'.format(ctx.message.server.default_channel.name)
await self.bot.send_message(ctx.message.channel, msg)
async def _welcome(self, member, server, channel = None):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
message = self.settings.getServerStat(server, "Welcome")
if message == None:
return
# Let's regex and replace [[user]] [[atuser]] and [[server]]
message = re.sub(self.regexUserName, "{}".format(DisplayName.name(member)), message)
message = re.sub(self.regexUserPing, "{}".format(member.mention), message)
message = re.sub(self.regexServer, "{}".format(server.name), message)
if suppress:
message = Nullify.clean(message)
if channel:
await self.bot.send_message(channel, message)
else:
await self.bot.send_message(server.default_channel, message)
async def _goodbye(self, member, server, channel = None):
# Check if we're suppressing @here and @everyone mentions
if self.settings.getServerStat(server, "SuppressMentions").lower() == "yes":
suppress = True
else:
suppress = False
message = self.settings.getServerStat(server, "Goodbye")
if message == None:
return
# Let's regex and replace [[user]] [[atuser]] and [[server]]
message = re.sub(self.regexUserName, "{}".format(DisplayName.name(member)), message)
message = re.sub(self.regexUserPing, "{}".format(member.mention), message)
message = re.sub(self.regexServer, "{}".format(server.name), message)
if suppress:
message = Nullify.clean(message)
if channel:
await self.bot.send_message(channel, message)
else:
await self.bot.send_message(server.default_channel, message)
@commands.command(pass_context=True)
async def setwelcomechannel(self, ctx, *, channel : discord.Channel = None):
"""Sets the channel for the welcome and goodbye messages (bot-admin only)."""
isAdmin = ctx.message.author.permissions_in(ctx.message.channel).administrator
if not isAdmin:
checkAdmin = self.settings.getServerStat(ctx.message.server, "AdminArray")
for role in ctx.message.author.roles:
for aRole in checkAdmin:
# Get the role that corresponds to the id
if aRole['ID'] == role.id:
isAdmin = True
# Only allow admins to change server stats
if not isAdmin:
await self.bot.send_message(ctx.message.channel, 'You do not have sufficient privileges to access this command.')
return
if channel == None:
self.settings.setServerStat(ctx.message.server, "WelcomeChannel", "")
msg = 'Welcome and goodbye messages will be displayed in the default channel (**{}**).'.format(ctx.message.server.default_channel.name)
await self.bot.send_message(ctx.message.channel, msg)
return
if type(channel) is str:
try:
role = discord.utils.get(message.server.channels, name=role)
except:
print("That channel does not exist")
return
# If we made it this far - then we can add it
self.settings.setServerStat(ctx.message.server, "WelcomeChannel", channel.id)
msg = 'Welcome and goodbye messages will be displayed in **{}**.'.format(channel.name)
await self.bot.send_message(ctx.message.channel, msg)
@setwelcomechannel.error
async def setwelcomechannel_error(self, ctx, error):
# do stuff
msg = 'setwelcomechannel Error: {}'.format(ctx)
await self.bot.say(msg)
|
{
"content_hash": "514001a7d419e1a034d00ae6c9d85ef3",
"timestamp": "",
"source": "github",
"line_count": 304,
"max_line_length": 167,
"avg_line_length": 45.098684210526315,
"alnum_prop": 0.6064186725018235,
"repo_name": "Mercurial/CorpBot.py",
"id": "832fdc8a41354e5c908ee3395825b62cc16adbf3",
"size": "13710",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Cogs/Welcome.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2708"
},
{
"name": "Python",
"bytes": "575793"
},
{
"name": "Shell",
"bytes": "2717"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.views.generic import RedirectView
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^executions/week/(?P<yyyy_mm_dd>\d{4}-\d{2}-\d{2})/$', 'batch_apps.views.one_week_view', name='weekly_date'),
url(r'^executions/week/$', 'batch_apps.views.one_week_view', name='weekly_default'),
url(r'^executions/day/(?P<yyyy_mm_dd>\d{4}-\d{2}-\d{2})/$', 'batch_apps.views.specific_date', name='daily_date'),
url(r'^executions/day/$', 'batch_apps.views.specific_date', name='daily_default'),
url(r'^executions/$', RedirectView.as_view(pattern_name='weekly_default', permanent=False), name='index'),
url(r'^$', RedirectView.as_view(pattern_name='index', permanent=False), name='superindex'),
url(r'^maintenance/$', 'batch_apps.views.maintenance', name='maintenance'),
url(r'^strip$', 'batch_apps.views.strip', name='strip'),
url(r'^vacuum$', 'batch_apps.views.vacuum', name='vacuum'),
)
|
{
"content_hash": "0e085b0e9d9a0eaa28929404637c32ff",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 138,
"avg_line_length": 73.41176470588235,
"alnum_prop": 0.5584935897435898,
"repo_name": "azam-a/batcher",
"id": "4a321edbcefd17691becfddb76aeb7864a039f3a",
"size": "1248",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "batcher/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "262"
},
{
"name": "CSS",
"bytes": "4537"
},
{
"name": "HTML",
"bytes": "7048"
},
{
"name": "JavaScript",
"bytes": "63533"
},
{
"name": "Python",
"bytes": "99898"
}
],
"symlink_target": ""
}
|
import copy
import string
import collections
import contextlib
@contextlib.contextmanager
def as_handle(handleish, mode='r', **kwargs):
"""Open handleish as file.
Stolen from Biopython
"""
if isinstance(handleish, basestring):
with open(handleish, mode, **kwargs) as fp:
yield fp
else:
yield handleish
# for generating 'safe' filenames from identifiers
cleanup_table = string.maketrans('/*|><+ ','_____p_')
def cleanup_id(identifier):
return identifier.translate(cleanup_table)
class nesteddict(collections.defaultdict):
"""Nested dictionary structure.
Based on Stack Overflow question 635483
"""
def __init__(self,default=None):
if default == None:
collections.defaultdict.__init__(self, nesteddict)
else:
collections.defaultdict.__init__(self, default)
self.locked = False
def lock(self):
# self.default_factory = raiseKeyError
self.default_factory = None
self.locked = True
for value in self.itervalues():
if isinstance(value, nesteddict):
value.lock()
def unlock(self):
self.default_factory = nesteddict
self.locked = False
for value in self.itervalues():
if isinstance(value, nesteddict):
value.unlock()
def islocked(self):
return self.locked
def todict(self):
raise NotImplementedError
for (key,val) in self.iteritems():
if isinstance(val,nesteddict):
val.todict()
self[key] = dict(val)
self = dict(self)
@staticmethod
def asdict(d):
d = copy.deepcopy(d)
for (key,val) in d.iteritems():
if isinstance(val,nesteddict):
d[key] = nesteddict.asdict(val)
return dict(d)
def nested_setdefault(self,keylist,default):
curr_dict = self
for key in keylist[:-1]:
curr_dict = curr_dict[key]
key = keylist[-1]
return curr_dict.setdefault(key,default)
def nested_get(self,keylist,default):
curr_dict = self
for key in keylist[:-1]:
curr_dict = curr_dict[key]
key = keylist[-1]
return curr_dict.get(key,default)
def nested_assign(self,keylist,val):
curr_dict = self
for key in keylist[:-1]:
curr_dict = curr_dict[key]
key = keylist[-1]
curr_dict[key] = val
return self
def walk(self):
for (key,value) in self.iteritems():
if isinstance(value, nesteddict):
for tup in value.walk():
yield (key,) + tup
else:
yield (key,value)
# these functions below implement special cases of nesteddict, where the
# deepest-level dict is of a particular type (e.g., int for counter, set
# for uniq objects, etc.)
#
# These functions could be implemented with nested_setdefault and
# nested_get, but would be less efficient since they would have to
# traverse the dict structure more times.
def nested_increment(self,keylist,increment=1):
curr_dict = self
for key in keylist[:-1]:
curr_dict = curr_dict[key]
key = keylist[-1]
curr_dict[key] = curr_dict.get(key,0) + increment
def nested_add(self,keylist,obj):
curr_dict = self
for key in keylist[:-1]:
curr_dict = curr_dict[key]
key = keylist[-1]
curr_dict.setdefault(key,set()).add(obj)
# class ModuleWrapper(object):
# """Wrap a module to allow user-defined __getattr__
#
# see http://stackoverflow.com/questions/2447353/getattr-on-a-module
# """
# def __init__(self, module, usergetattr):
# self.module = module
# self.usergetattr = usergetattr
#
# def __getattr__(self, name):
# return self.usergetattr(self,name)
|
{
"content_hash": "6c0a82643c0449d9eddad05886368e89",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 76,
"avg_line_length": 28.735714285714284,
"alnum_prop": 0.5781754909271688,
"repo_name": "churchlab/ulutil",
"id": "c9d0f0bc3f15e562663c214aad23c4b40d23cc58",
"size": "4600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ulutil/pyutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "155413"
}
],
"symlink_target": ""
}
|
import pydcop
try:
# get 'player' service of 'amarok' aplication
playerService = pydcop.DCOPObject('amarok', 'player')
# call service methods for getting song information
info = dict(
title=playerService.title(),
artist=playerService.artist(),
album=playerService.album()
)
print '%(artist)s - %(title)s (%(album)s)' % info
except RuntimeError, e:
print 'Amarok is not running.'
# sample output:
# Mercyful Fate - A Dangerous Meeting (Don't Break The Oath)
|
{
"content_hash": "455d83e3e65781f5ecc24d005b173b74",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 60,
"avg_line_length": 25.7,
"alnum_prop": 0.6595330739299611,
"repo_name": "ActiveState/code",
"id": "00f79d3c28face3d7fdae72bba565cd0b1a68cc8",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "recipes/Python/576878_Accessing_DCOP_services/recipe-576878.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "35894"
},
{
"name": "C",
"bytes": "56048"
},
{
"name": "C++",
"bytes": "90880"
},
{
"name": "HTML",
"bytes": "11656"
},
{
"name": "Java",
"bytes": "57468"
},
{
"name": "JavaScript",
"bytes": "181218"
},
{
"name": "PHP",
"bytes": "250144"
},
{
"name": "Perl",
"bytes": "37296"
},
{
"name": "Perl 6",
"bytes": "9914"
},
{
"name": "Python",
"bytes": "17387779"
},
{
"name": "Ruby",
"bytes": "40233"
},
{
"name": "Shell",
"bytes": "190732"
},
{
"name": "Tcl",
"bytes": "674650"
}
],
"symlink_target": ""
}
|
import string
from header_common import *
from module_info import *
from module_strings import *
from process_common import *
def save_strings(strings):
ofile = open(export_dir + "strings.txt","w")
ofile.write("stringsfile version 1\n")
ofile.write("%d\n"%len(strings))
for i_string in xrange(len(strings)):
str = strings[i_string]
ofile.write("str_%s %s\n"%(convert_to_identifier(str[0]),replace_spaces(str[1])))
ofile.close()
def save_python_header():
ofile = open("./ID_strings.py","w")
for i_string in xrange(len(strings)):
ofile.write("str_%s = %d\n"%(convert_to_identifier(strings[i_string][0]),i_string))
ofile.write("\n\n")
ofile.close()
print "Exporting strings..."
save_python_header()
save_strings(strings)
|
{
"content_hash": "4836f278dd026fc7fc5c5e19a7e27f94",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 87,
"avg_line_length": 28.962962962962962,
"alnum_prop": 0.6585677749360613,
"repo_name": "nycz/useful-sisters",
"id": "0bca8645f251a705fb06f712e0c983d4ba1736b8",
"size": "782",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "process_strings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8309324"
},
{
"name": "Shell",
"bytes": "1950"
}
],
"symlink_target": ""
}
|
from classes import *
from featutil import *
from features import *
|
{
"content_hash": "b5b82275747f607804ba92950ddc1b9d",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 22,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.7794117647058824,
"repo_name": "QuantSoftware/QuantSoftwareToolkit",
"id": "aca4b1e71d988c40e26803129ae2c4ea4e68491f",
"size": "68",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "QSTK/qstkfeat/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "5708"
},
{
"name": "CSS",
"bytes": "127790"
},
{
"name": "Groff",
"bytes": "36352"
},
{
"name": "HTML",
"bytes": "18435650"
},
{
"name": "Java",
"bytes": "8096"
},
{
"name": "JavaScript",
"bytes": "21455"
},
{
"name": "Makefile",
"bytes": "2590"
},
{
"name": "Python",
"bytes": "1667488"
},
{
"name": "Shell",
"bytes": "9473"
},
{
"name": "TeX",
"bytes": "1018533"
}
],
"symlink_target": ""
}
|
"""
Functions to operate on polynomials.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['poly', 'roots', 'polyint', 'polyder', 'polyadd',
'polysub', 'polymul', 'polydiv', 'polyval', 'poly1d',
'polyfit', 'RankWarning']
import re
import warnings
import numpy.core.numeric as NX
from numpy.core import (isscalar, abs, finfo, atleast_1d, hstack, dot, array,
ones)
from numpy.lib.twodim_base import diag, vander
from numpy.lib.function_base import trim_zeros
from numpy.lib.type_check import iscomplex, real, imag, mintypecode
from numpy.linalg import eigvals, lstsq, inv
class RankWarning(UserWarning):
"""
Issued by `polyfit` when the Vandermonde matrix is rank deficient.
For more information, a way to suppress the warning, and an example of
`RankWarning` being issued, see `polyfit`.
"""
pass
def poly(seq_of_zeros):
"""
Find the coefficients of a polynomial with the given sequence of roots.
Returns the coefficients of the polynomial whose leading coefficient
is one for the given sequence of zeros (multiple roots must be included
in the sequence as many times as their multiplicity; see Examples).
A square matrix (or array, which will be treated as a matrix) can also
be given, in which case the coefficients of the characteristic polynomial
of the matrix are returned.
Parameters
----------
seq_of_zeros : array_like, shape (N,) or (N, N)
A sequence of polynomial roots, or a square array or matrix object.
Returns
-------
c : ndarray
1D array of polynomial coefficients from highest to lowest degree:
``c[0] * x**(N) + c[1] * x**(N-1) + ... + c[N-1] * x + c[N]``
where c[0] always equals 1.
Raises
------
ValueError
If input is the wrong shape (the input must be a 1-D or square
2-D array).
See Also
--------
polyval : Compute polynomial values.
roots : Return the roots of a polynomial.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
Specifying the roots of a polynomial still leaves one degree of
freedom, typically represented by an undetermined leading
coefficient. [1]_ In the case of this function, that coefficient -
the first one in the returned array - is always taken as one. (If
for some reason you have one other point, the only automatic way
presently to leverage that information is to use ``polyfit``.)
The characteristic polynomial, :math:`p_a(t)`, of an `n`-by-`n`
matrix **A** is given by
:math:`p_a(t) = \\mathrm{det}(t\\, \\mathbf{I} - \\mathbf{A})`,
where **I** is the `n`-by-`n` identity matrix. [2]_
References
----------
.. [1] M. Sullivan and M. Sullivan, III, "Algebra and Trignometry,
Enhanced With Graphing Utilities," Prentice-Hall, pg. 318, 1996.
.. [2] G. Strang, "Linear Algebra and Its Applications, 2nd Edition,"
Academic Press, pg. 182, 1980.
Examples
--------
Given a sequence of a polynomial's zeros:
>>> np.poly((0, 0, 0)) # Multiple root example
array([1, 0, 0, 0])
The line above represents z**3 + 0*z**2 + 0*z + 0.
>>> np.poly((-1./2, 0, 1./2))
array([ 1. , 0. , -0.25, 0. ])
The line above represents z**3 - z/4
>>> np.poly((np.random.random(1.)[0], 0, np.random.random(1.)[0]))
array([ 1. , -0.77086955, 0.08618131, 0. ]) #random
Given a square array object:
>>> P = np.array([[0, 1./3], [-1./2, 0]])
>>> np.poly(P)
array([ 1. , 0. , 0.16666667])
Or a square matrix object:
>>> np.poly(np.matrix(P))
array([ 1. , 0. , 0.16666667])
Note how in all cases the leading coefficient is always 1.
"""
seq_of_zeros = atleast_1d(seq_of_zeros)
sh = seq_of_zeros.shape
if len(sh) == 2 and sh[0] == sh[1] and sh[0] != 0:
seq_of_zeros = eigvals(seq_of_zeros)
elif len(sh) == 1:
dt = seq_of_zeros.dtype
# Let object arrays slip through, e.g. for arbitrary precision
if dt != object:
seq_of_zeros = seq_of_zeros.astype(mintypecode(dt.char))
else:
raise ValueError("input must be 1d or non-empty square 2d array.")
if len(seq_of_zeros) == 0:
return 1.0
dt = seq_of_zeros.dtype
a = ones((1,), dtype=dt)
for k in range(len(seq_of_zeros)):
a = NX.convolve(a, array([1, -seq_of_zeros[k]], dtype=dt),
mode='full')
if issubclass(a.dtype.type, NX.complexfloating):
# if complex roots are all complex conjugates, the roots are real.
roots = NX.asarray(seq_of_zeros, complex)
if NX.all(NX.sort(roots) == NX.sort(roots.conjugate())):
a = a.real.copy()
return a
def roots(p):
"""
Return the roots of a polynomial with coefficients given in p.
The values in the rank-1 array `p` are coefficients of a polynomial.
If the length of `p` is n+1 then the polynomial is described by::
p[0] * x**n + p[1] * x**(n-1) + ... + p[n-1]*x + p[n]
Parameters
----------
p : array_like
Rank-1 array of polynomial coefficients.
Returns
-------
out : ndarray
An array containing the roots of the polynomial.
Raises
------
ValueError
When `p` cannot be converted to a rank-1 array.
See also
--------
poly : Find the coefficients of a polynomial with a given sequence
of roots.
polyval : Compute polynomial values.
polyfit : Least squares polynomial fit.
poly1d : A one-dimensional polynomial class.
Notes
-----
The algorithm relies on computing the eigenvalues of the
companion matrix [1]_.
References
----------
.. [1] R. A. Horn & C. R. Johnson, *Matrix Analysis*. Cambridge, UK:
Cambridge University Press, 1999, pp. 146-7.
Examples
--------
>>> coeff = [3.2, 2, 1]
>>> np.roots(coeff)
array([-0.3125+0.46351241j, -0.3125-0.46351241j])
"""
# If input is scalar, this makes it an array
p = atleast_1d(p)
if p.ndim != 1:
raise ValueError("Input must be a rank-1 array.")
# find non-zero array entries
non_zero = NX.nonzero(NX.ravel(p))[0]
# Return an empty array if polynomial is all zeros
if len(non_zero) == 0:
return NX.array([])
# find the number of trailing zeros -- this is the number of roots at 0.
trailing_zeros = len(p) - non_zero[-1] - 1
# strip leading and trailing zeros
p = p[int(non_zero[0]):int(non_zero[-1])+1]
# casting: if incoming array isn't floating point, make it floating point.
if not issubclass(p.dtype.type, (NX.floating, NX.complexfloating)):
p = p.astype(float)
N = len(p)
if N > 1:
# build companion matrix and find its eigenvalues (the roots)
A = diag(NX.ones((N-2,), p.dtype), -1)
A[0,:] = -p[1:] / p[0]
roots = eigvals(A)
else:
roots = NX.array([])
# tack any zeros onto the back of the array
roots = hstack((roots, NX.zeros(trailing_zeros, roots.dtype)))
return roots
def polyint(p, m=1, k=None):
"""
Return an antiderivative (indefinite integral) of a polynomial.
The returned order `m` antiderivative `P` of polynomial `p` satisfies
:math:`\\frac{d^m}{dx^m}P(x) = p(x)` and is defined up to `m - 1`
integration constants `k`. The constants determine the low-order
polynomial part
.. math:: \\frac{k_{m-1}}{0!} x^0 + \\ldots + \\frac{k_0}{(m-1)!}x^{m-1}
of `P` so that :math:`P^{(j)}(0) = k_{m-j-1}`.
Parameters
----------
p : array_like or poly1d
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of the antiderivative. (Default: 1)
k : list of `m` scalars or scalar, optional
Integration constants. They are given in the order of integration:
those corresponding to highest-order terms come first.
If ``None`` (default), all constants are assumed to be zero.
If `m = 1`, a single scalar can be given instead of a list.
See Also
--------
polyder : derivative of a polynomial
poly1d.integ : equivalent method
Examples
--------
The defining property of the antiderivative:
>>> p = np.poly1d([1,1,1])
>>> P = np.polyint(p)
>>> P
poly1d([ 0.33333333, 0.5 , 1. , 0. ])
>>> np.polyder(P) == p
True
The integration constants default to zero, but can be specified:
>>> P = np.polyint(p, 3)
>>> P(0)
0.0
>>> np.polyder(P)(0)
0.0
>>> np.polyder(P, 2)(0)
0.0
>>> P = np.polyint(p, 3, k=[6,5,3])
>>> P
poly1d([ 0.01666667, 0.04166667, 0.16666667, 3. , 5. , 3. ])
Note that 3 = 6 / 2!, and that the constants are given in the order of
integrations. Constant of the highest-order polynomial term comes first:
>>> np.polyder(P, 2)(0)
6.0
>>> np.polyder(P, 1)(0)
5.0
>>> P(0)
3.0
"""
m = int(m)
if m < 0:
raise ValueError("Order of integral must be positive (see polyder)")
if k is None:
k = NX.zeros(m, float)
k = atleast_1d(k)
if len(k) == 1 and m > 1:
k = k[0]*NX.ones(m, float)
if len(k) < m:
raise ValueError(
"k must be a scalar or a rank-1 array of length 1 or >m.")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
if m == 0:
if truepoly:
return poly1d(p)
return p
else:
# Note: this must work also with object and integer arrays
y = NX.concatenate((p.__truediv__(NX.arange(len(p), 0, -1)), [k[0]]))
val = polyint(y, m - 1, k=k[1:])
if truepoly:
return poly1d(val)
return val
def polyder(p, m=1):
"""
Return the derivative of the specified order of a polynomial.
Parameters
----------
p : poly1d or sequence
Polynomial to differentiate.
A sequence is interpreted as polynomial coefficients, see `poly1d`.
m : int, optional
Order of differentiation (default: 1)
Returns
-------
der : poly1d
A new polynomial representing the derivative.
See Also
--------
polyint : Anti-derivative of a polynomial.
poly1d : Class for one-dimensional polynomials.
Examples
--------
The derivative of the polynomial :math:`x^3 + x^2 + x^1 + 1` is:
>>> p = np.poly1d([1,1,1,1])
>>> p2 = np.polyder(p)
>>> p2
poly1d([3, 2, 1])
which evaluates to:
>>> p2(2.)
17.0
We can verify this, approximating the derivative with
``(f(x + h) - f(x))/h``:
>>> (p(2. + 0.001) - p(2.)) / 0.001
17.007000999997857
The fourth-order derivative of a 3rd-order polynomial is zero:
>>> np.polyder(p, 2)
poly1d([6, 2])
>>> np.polyder(p, 3)
poly1d([6])
>>> np.polyder(p, 4)
poly1d([ 0.])
"""
m = int(m)
if m < 0:
raise ValueError("Order of derivative must be positive (see polyint)")
truepoly = isinstance(p, poly1d)
p = NX.asarray(p)
n = len(p) - 1
y = p[:-1] * NX.arange(n, 0, -1)
if m == 0:
val = p
else:
val = polyder(y, m - 1)
if truepoly:
val = poly1d(val)
return val
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Least squares polynomial fit.
Fit a polynomial ``p(x) = p[0] * x**deg + ... + p[deg]`` of degree `deg`
to points `(x, y)`. Returns a vector of coefficients `p` that minimises
the squared error.
Parameters
----------
x : array_like, shape (M,)
x-coordinates of the M sample points ``(x[i], y[i])``.
y : array_like, shape (M,) or (M, K)
y-coordinates of the sample points. Several data sets of sample
points sharing the same x-coordinates can be fitted at once by
passing in a 2D-array that contains one dataset per column.
deg : int
Degree of the fitting polynomial
rcond : float, optional
Relative condition number of the fit. Singular values smaller than
this relative to the largest singular value will be ignored. The
default value is len(x)*eps, where eps is the relative precision of
the float type, about 2e-16 in most cases.
full : bool, optional
Switch determining nature of return value. When it is False (the
default) just the coefficients are returned, when True diagnostic
information from the singular value decomposition is also returned.
w : array_like, shape (M,), optional
Weights to apply to the y-coordinates of the sample points. For
gaussian uncertainties, use 1/sigma (not 1/sigma**2).
cov : bool, optional
Return the estimate and the covariance matrix of the estimate
If full is True, then cov is not returned.
Returns
-------
p : ndarray, shape (deg + 1,) or (deg + 1, K)
Polynomial coefficients, highest power first. If `y` was 2-D, the
coefficients for `k`-th data set are in ``p[:,k]``.
residuals, rank, singular_values, rcond
Present only if `full` = True. Residuals of the least-squares fit,
the effective rank of the scaled Vandermonde coefficient matrix,
its singular values, and the specified value of `rcond`. For more
details, see `linalg.lstsq`.
V : ndarray, shape (M,M) or (M,M,K)
Present only if `full` = False and `cov`=True. The covariance
matrix of the polynomial coefficient estimates. The diagonal of
this matrix are the variance estimates for each coefficient. If y
is a 2-D array, then the covariance matrix for the `k`-th data set
are in ``V[:,:,k]``
Warns
-----
RankWarning
The rank of the coefficient matrix in the least-squares fit is
deficient. The warning is only raised if `full` = False.
The warnings can be turned off by
>>> import warnings
>>> warnings.simplefilter('ignore', np.RankWarning)
See Also
--------
polyval : Compute polynomial values.
linalg.lstsq : Computes a least-squares fit.
scipy.interpolate.UnivariateSpline : Computes spline fits.
Notes
-----
The solution minimizes the squared error
.. math ::
E = \\sum_{j=0}^k |p(x_j) - y_j|^2
in the equations::
x[0]**n * p[0] + ... + x[0] * p[n-1] + p[n] = y[0]
x[1]**n * p[0] + ... + x[1] * p[n-1] + p[n] = y[1]
...
x[k]**n * p[0] + ... + x[k] * p[n-1] + p[n] = y[k]
The coefficient matrix of the coefficients `p` is a Vandermonde matrix.
`polyfit` issues a `RankWarning` when the least-squares fit is badly
conditioned. This implies that the best fit is not well-defined due
to numerical error. The results may be improved by lowering the polynomial
degree or by replacing `x` by `x` - `x`.mean(). The `rcond` parameter
can also be set to a value smaller than its default, but the resulting
fit may be spurious: including contributions from the small singular
values can add numerical noise to the result.
Note that fitting polynomial coefficients is inherently badly conditioned
when the degree of the polynomial is large or the interval of sample points
is badly centered. The quality of the fit should always be checked in these
cases. When polynomial fits are not satisfactory, splines may be a good
alternative.
References
----------
.. [1] Wikipedia, "Curve fitting",
http://en.wikipedia.org/wiki/Curve_fitting
.. [2] Wikipedia, "Polynomial interpolation",
http://en.wikipedia.org/wiki/Polynomial_interpolation
Examples
--------
>>> x = np.array([0.0, 1.0, 2.0, 3.0, 4.0, 5.0])
>>> y = np.array([0.0, 0.8, 0.9, 0.1, -0.8, -1.0])
>>> z = np.polyfit(x, y, 3)
>>> z
array([ 0.08703704, -0.81349206, 1.69312169, -0.03968254])
It is convenient to use `poly1d` objects for dealing with polynomials:
>>> p = np.poly1d(z)
>>> p(0.5)
0.6143849206349179
>>> p(3.5)
-0.34732142857143039
>>> p(10)
22.579365079365115
High-order polynomials may oscillate wildly:
>>> p30 = np.poly1d(np.polyfit(x, y, 30))
/... RankWarning: Polyfit may be poorly conditioned...
>>> p30(4)
-0.80000000000000204
>>> p30(5)
-0.99999999999999445
>>> p30(4.5)
-0.10547061179440398
Illustration:
>>> import matplotlib.pyplot as plt
>>> xp = np.linspace(-2, 6, 100)
>>> _ = plt.plot(x, y, '.', xp, p(xp), '-', xp, p30(xp), '--')
>>> plt.ylim(-2,2)
(-2, 2)
>>> plt.show()
"""
order = int(deg) + 1
x = NX.asarray(x) + 0.0
y = NX.asarray(y) + 0.0
# check arguments.
if deg < 0:
raise ValueError("expected deg >= 0")
if x.ndim != 1:
raise TypeError("expected 1D vector for x")
if x.size == 0:
raise TypeError("expected non-empty vector for x")
if y.ndim < 1 or y.ndim > 2:
raise TypeError("expected 1D or 2D array for y")
if x.shape[0] != y.shape[0]:
raise TypeError("expected x and y to have same length")
# set rcond
if rcond is None:
rcond = len(x)*finfo(x.dtype).eps
# set up least squares equation for powers of x
lhs = vander(x, order)
rhs = y
# apply weighting
if w is not None:
w = NX.asarray(w) + 0.0
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
lhs *= w[:, NX.newaxis]
if rhs.ndim == 2:
rhs *= w[:, NX.newaxis]
else:
rhs *= w
# scale lhs to improve condition number and solve
scale = NX.sqrt((lhs*lhs).sum(axis=0))
lhs /= scale
c, resids, rank, s = lstsq(lhs, rhs, rcond)
c = (c.T/scale).T # broadcast scale coefficients
# warn on rank reduction, which indicates an ill conditioned matrix
if rank != order and not full:
msg = "Polyfit may be poorly conditioned"
warnings.warn(msg, RankWarning, stacklevel=2)
if full:
return c, resids, rank, s, rcond
elif cov:
Vbase = inv(dot(lhs.T, lhs))
Vbase /= NX.outer(scale, scale)
# Some literature ignores the extra -2.0 factor in the denominator, but
# it is included here because the covariance of Multivariate Student-T
# (which is implied by a Bayesian uncertainty analysis) includes it.
# Plus, it gives a slightly more conservative estimate of uncertainty.
if len(x) <= order + 2:
raise ValueError("the number of data points must exceed order + 2 "
"for Bayesian estimate the covariance matrix")
fac = resids / (len(x) - order - 2.0)
if y.ndim == 1:
return c, Vbase * fac
else:
return c, Vbase[:,:, NX.newaxis] * fac
else:
return c
def polyval(p, x):
"""
Evaluate a polynomial at specific values.
If `p` is of length N, this function returns the value:
``p[0]*x**(N-1) + p[1]*x**(N-2) + ... + p[N-2]*x + p[N-1]``
If `x` is a sequence, then `p(x)` is returned for each element of `x`.
If `x` is another polynomial then the composite polynomial `p(x(t))`
is returned.
Parameters
----------
p : array_like or poly1d object
1D array of polynomial coefficients (including coefficients equal
to zero) from highest degree to the constant term, or an
instance of poly1d.
x : array_like or poly1d object
A number, an array of numbers, or an instance of poly1d, at
which to evaluate `p`.
Returns
-------
values : ndarray or poly1d
If `x` is a poly1d instance, the result is the composition of the two
polynomials, i.e., `x` is "substituted" in `p` and the simplified
result is returned. In addition, the type of `x` - array_like or
poly1d - governs the type of the output: `x` array_like => `values`
array_like, `x` a poly1d object => `values` is also.
See Also
--------
poly1d: A polynomial class.
Notes
-----
Horner's scheme [1]_ is used to evaluate the polynomial. Even so,
for polynomials of high degree the values may be inaccurate due to
rounding errors. Use carefully.
References
----------
.. [1] I. N. Bronshtein, K. A. Semendyayev, and K. A. Hirsch (Eng.
trans. Ed.), *Handbook of Mathematics*, New York, Van Nostrand
Reinhold Co., 1985, pg. 720.
Examples
--------
>>> np.polyval([3,0,1], 5) # 3 * 5**2 + 0 * 5**1 + 1
76
>>> np.polyval([3,0,1], np.poly1d(5))
poly1d([ 76.])
>>> np.polyval(np.poly1d([3,0,1]), 5)
76
>>> np.polyval(np.poly1d([3,0,1]), np.poly1d(5))
poly1d([ 76.])
"""
p = NX.asarray(p)
if isinstance(x, poly1d):
y = 0
else:
x = NX.asarray(x)
y = NX.zeros_like(x)
for i in range(len(p)):
y = y * x + p[i]
return y
def polyadd(a1, a2):
"""
Find the sum of two polynomials.
Returns the polynomial resulting from the sum of two input polynomials.
Each input must be either a poly1d object or a 1D sequence of polynomial
coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The sum of the inputs. If either input is a poly1d object, then the
output is also a poly1d object. Otherwise, it is a 1D array of
polynomial coefficients from highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub, polyval
Examples
--------
>>> np.polyadd([1, 2], [9, 5, 4])
array([9, 6, 6])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2])
>>> p2 = np.poly1d([9, 5, 4])
>>> print(p1)
1 x + 2
>>> print(p2)
2
9 x + 5 x + 4
>>> print(np.polyadd(p1, p2))
2
9 x + 6 x + 6
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 + a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) + a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 + NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polysub(a1, a2):
"""
Difference (subtraction) of two polynomials.
Given two polynomials `a1` and `a2`, returns ``a1 - a2``.
`a1` and `a2` can be either array_like sequences of the polynomials'
coefficients (including coefficients equal to zero), or `poly1d` objects.
Parameters
----------
a1, a2 : array_like or poly1d
Minuend and subtrahend polynomials, respectively.
Returns
-------
out : ndarray or poly1d
Array or `poly1d` object of the difference polynomial's coefficients.
See Also
--------
polyval, polydiv, polymul, polyadd
Examples
--------
.. math:: (2 x^2 + 10 x - 2) - (3 x^2 + 10 x -4) = (-x^2 + 2)
>>> np.polysub([2, 10, -2], [3, 10, -4])
array([-1, 0, 2])
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1 = atleast_1d(a1)
a2 = atleast_1d(a2)
diff = len(a2) - len(a1)
if diff == 0:
val = a1 - a2
elif diff > 0:
zr = NX.zeros(diff, a1.dtype)
val = NX.concatenate((zr, a1)) - a2
else:
zr = NX.zeros(abs(diff), a2.dtype)
val = a1 - NX.concatenate((zr, a2))
if truepoly:
val = poly1d(val)
return val
def polymul(a1, a2):
"""
Find the product of two polynomials.
Finds the polynomial resulting from the multiplication of the two input
polynomials. Each input must be either a poly1d object or a 1D sequence
of polynomial coefficients, from highest to lowest degree.
Parameters
----------
a1, a2 : array_like or poly1d object
Input polynomials.
Returns
-------
out : ndarray or poly1d object
The polynomial resulting from the multiplication of the inputs. If
either inputs is a poly1d object, then the output is also a poly1d
object. Otherwise, it is a 1D array of polynomial coefficients from
highest to lowest degree.
See Also
--------
poly1d : A one-dimensional polynomial class.
poly, polyadd, polyder, polydiv, polyfit, polyint, polysub,
polyval
convolve : Array convolution. Same output as polymul, but has parameter
for overlap mode.
Examples
--------
>>> np.polymul([1, 2, 3], [9, 5, 1])
array([ 9, 23, 38, 17, 3])
Using poly1d objects:
>>> p1 = np.poly1d([1, 2, 3])
>>> p2 = np.poly1d([9, 5, 1])
>>> print(p1)
2
1 x + 2 x + 3
>>> print(p2)
2
9 x + 5 x + 1
>>> print(np.polymul(p1, p2))
4 3 2
9 x + 23 x + 38 x + 17 x + 3
"""
truepoly = (isinstance(a1, poly1d) or isinstance(a2, poly1d))
a1, a2 = poly1d(a1), poly1d(a2)
val = NX.convolve(a1, a2)
if truepoly:
val = poly1d(val)
return val
def polydiv(u, v):
"""
Returns the quotient and remainder of polynomial division.
The input arrays are the coefficients (including any coefficients
equal to zero) of the "numerator" (dividend) and "denominator"
(divisor) polynomials, respectively.
Parameters
----------
u : array_like or poly1d
Dividend polynomial's coefficients.
v : array_like or poly1d
Divisor polynomial's coefficients.
Returns
-------
q : ndarray
Coefficients, including those equal to zero, of the quotient.
r : ndarray
Coefficients, including those equal to zero, of the remainder.
See Also
--------
poly, polyadd, polyder, polydiv, polyfit, polyint, polymul, polysub,
polyval
Notes
-----
Both `u` and `v` must be 0-d or 1-d (ndim = 0 or 1), but `u.ndim` need
not equal `v.ndim`. In other words, all four possible combinations -
``u.ndim = v.ndim = 0``, ``u.ndim = v.ndim = 1``,
``u.ndim = 1, v.ndim = 0``, and ``u.ndim = 0, v.ndim = 1`` - work.
Examples
--------
.. math:: \\frac{3x^2 + 5x + 2}{2x + 1} = 1.5x + 1.75, remainder 0.25
>>> x = np.array([3.0, 5.0, 2.0])
>>> y = np.array([2.0, 1.0])
>>> np.polydiv(x, y)
(array([ 1.5 , 1.75]), array([ 0.25]))
"""
truepoly = (isinstance(u, poly1d) or isinstance(u, poly1d))
u = atleast_1d(u) + 0.0
v = atleast_1d(v) + 0.0
# w has the common type
w = u[0] + v[0]
m = len(u) - 1
n = len(v) - 1
scale = 1. / v[0]
q = NX.zeros((max(m - n + 1, 1),), w.dtype)
r = u.copy()
for k in range(0, m-n+1):
d = scale * r[k]
q[k] = d
r[k:k+n+1] -= d*v
while NX.allclose(r[0], 0, rtol=1e-14) and (r.shape[-1] > 1):
r = r[1:]
if truepoly:
return poly1d(q), poly1d(r)
return q, r
_poly_mat = re.compile(r"[*][*]([0-9]*)")
def _raise_power(astr, wrap=70):
n = 0
line1 = ''
line2 = ''
output = ' '
while True:
mat = _poly_mat.search(astr, n)
if mat is None:
break
span = mat.span()
power = mat.groups()[0]
partstr = astr[n:span[0]]
n = span[1]
toadd2 = partstr + ' '*(len(power)-1)
toadd1 = ' '*(len(partstr)-1) + power
if ((len(line2) + len(toadd2) > wrap) or
(len(line1) + len(toadd1) > wrap)):
output += line1 + "\n" + line2 + "\n "
line1 = toadd1
line2 = toadd2
else:
line2 += partstr + ' '*(len(power)-1)
line1 += ' '*(len(partstr)-1) + power
output += line1 + "\n" + line2
return output + astr[n:]
class poly1d(object):
"""
A one-dimensional polynomial class.
A convenience class, used to encapsulate "natural" operations on
polynomials so that said operations may take on their customary
form in code (see Examples).
Parameters
----------
c_or_r : array_like
The polynomial's coefficients, in decreasing powers, or if
the value of the second parameter is True, the polynomial's
roots (values where the polynomial evaluates to 0). For example,
``poly1d([1, 2, 3])`` returns an object that represents
:math:`x^2 + 2x + 3`, whereas ``poly1d([1, 2, 3], True)`` returns
one that represents :math:`(x-1)(x-2)(x-3) = x^3 - 6x^2 + 11x -6`.
r : bool, optional
If True, `c_or_r` specifies the polynomial's roots; the default
is False.
variable : str, optional
Changes the variable used when printing `p` from `x` to `variable`
(see Examples).
Examples
--------
Construct the polynomial :math:`x^2 + 2x + 3`:
>>> p = np.poly1d([1, 2, 3])
>>> print(np.poly1d(p))
2
1 x + 2 x + 3
Evaluate the polynomial at :math:`x = 0.5`:
>>> p(0.5)
4.25
Find the roots:
>>> p.r
array([-1.+1.41421356j, -1.-1.41421356j])
>>> p(p.r)
array([ -4.44089210e-16+0.j, -4.44089210e-16+0.j])
These numbers in the previous line represent (0, 0) to machine precision
Show the coefficients:
>>> p.c
array([1, 2, 3])
Display the order (the leading zero-coefficients are removed):
>>> p.order
2
Show the coefficient of the k-th power in the polynomial
(which is equivalent to ``p.c[-(i+1)]``):
>>> p[1]
2
Polynomials can be added, subtracted, multiplied, and divided
(returns quotient and remainder):
>>> p * p
poly1d([ 1, 4, 10, 12, 9])
>>> (p**3 + 4) / p
(poly1d([ 1., 4., 10., 12., 9.]), poly1d([ 4.]))
``asarray(p)`` gives the coefficient array, so polynomials can be
used in all functions that accept arrays:
>>> p**2 # square of polynomial
poly1d([ 1, 4, 10, 12, 9])
>>> np.square(p) # square of individual coefficients
array([1, 4, 9])
The variable used in the string representation of `p` can be modified,
using the `variable` parameter:
>>> p = np.poly1d([1,2,3], variable='z')
>>> print(p)
2
1 z + 2 z + 3
Construct a polynomial from its roots:
>>> np.poly1d([1, 2], True)
poly1d([ 1, -3, 2])
This is the same polynomial as obtained by:
>>> np.poly1d([1, -1]) * np.poly1d([1, -2])
poly1d([ 1, -3, 2])
"""
__hash__ = None
@property
def coeffs(self):
""" The polynomial coefficients """
return self._coeffs
@property
def variable(self):
""" The name of the polynomial variable """
return self._variable
# calculated attributes
@property
def order(self):
""" The order or degree of the polynomial """
return len(self._coeffs) - 1
@property
def roots(self):
""" The roots of the polynomial, where self(x) == 0 """
return roots(self._coeffs)
# alias attributes
r = roots
c = coef = coefficients = coeffs
o = order
def __init__(self, c_or_r, r=False, variable=None):
if isinstance(c_or_r, poly1d):
self._variable = c_or_r._variable
self._coeffs = c_or_r._coeffs
if set(c_or_r.__dict__) - set(self.__dict__):
msg = ("In the future extra properties will not be copied "
"across when constructing one poly1d from another")
warnings.warn(msg, FutureWarning, stacklevel=2)
self.__dict__.update(c_or_r.__dict__)
if variable is not None:
self._variable = variable
return
if r:
c_or_r = poly(c_or_r)
c_or_r = atleast_1d(c_or_r)
if c_or_r.ndim > 1:
raise ValueError("Polynomial must be 1d only.")
c_or_r = trim_zeros(c_or_r, trim='f')
if len(c_or_r) == 0:
c_or_r = NX.array([0.])
self._coeffs = c_or_r
if variable is None:
variable = 'x'
self._variable = variable
def __array__(self, t=None):
if t:
return NX.asarray(self.coeffs, t)
else:
return NX.asarray(self.coeffs)
def __repr__(self):
vals = repr(self.coeffs)
vals = vals[6:-1]
return "poly1d(%s)" % vals
def __len__(self):
return self.order
def __str__(self):
thestr = "0"
var = self.variable
# Remove leading zeros
coeffs = self.coeffs[NX.logical_or.accumulate(self.coeffs != 0)]
N = len(coeffs)-1
def fmt_float(q):
s = '%.4g' % q
if s.endswith('.0000'):
s = s[:-5]
return s
for k in range(len(coeffs)):
if not iscomplex(coeffs[k]):
coefstr = fmt_float(real(coeffs[k]))
elif real(coeffs[k]) == 0:
coefstr = '%sj' % fmt_float(imag(coeffs[k]))
else:
coefstr = '(%s + %sj)' % (fmt_float(real(coeffs[k])),
fmt_float(imag(coeffs[k])))
power = (N-k)
if power == 0:
if coefstr != '0':
newstr = '%s' % (coefstr,)
else:
if k == 0:
newstr = '0'
else:
newstr = ''
elif power == 1:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = var
else:
newstr = '%s %s' % (coefstr, var)
else:
if coefstr == '0':
newstr = ''
elif coefstr == 'b':
newstr = '%s**%d' % (var, power,)
else:
newstr = '%s %s**%d' % (coefstr, var, power)
if k > 0:
if newstr != '':
if newstr.startswith('-'):
thestr = "%s - %s" % (thestr, newstr[1:])
else:
thestr = "%s + %s" % (thestr, newstr)
else:
thestr = newstr
return _raise_power(thestr)
def __call__(self, val):
return polyval(self.coeffs, val)
def __neg__(self):
return poly1d(-self.coeffs)
def __pos__(self):
return self
def __mul__(self, other):
if isscalar(other):
return poly1d(self.coeffs * other)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __rmul__(self, other):
if isscalar(other):
return poly1d(other * self.coeffs)
else:
other = poly1d(other)
return poly1d(polymul(self.coeffs, other.coeffs))
def __add__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __radd__(self, other):
other = poly1d(other)
return poly1d(polyadd(self.coeffs, other.coeffs))
def __pow__(self, val):
if not isscalar(val) or int(val) != val or val < 0:
raise ValueError("Power to non-negative integers only.")
res = [1]
for _ in range(val):
res = polymul(self.coeffs, res)
return poly1d(res)
def __sub__(self, other):
other = poly1d(other)
return poly1d(polysub(self.coeffs, other.coeffs))
def __rsub__(self, other):
other = poly1d(other)
return poly1d(polysub(other.coeffs, self.coeffs))
def __div__(self, other):
if isscalar(other):
return poly1d(self.coeffs/other)
else:
other = poly1d(other)
return polydiv(self, other)
__truediv__ = __div__
def __rdiv__(self, other):
if isscalar(other):
return poly1d(other/self.coeffs)
else:
other = poly1d(other)
return polydiv(other, self)
__rtruediv__ = __rdiv__
def __eq__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
if self.coeffs.shape != other.coeffs.shape:
return False
return (self.coeffs == other.coeffs).all()
def __ne__(self, other):
if not isinstance(other, poly1d):
return NotImplemented
return not self.__eq__(other)
def __getitem__(self, val):
ind = self.order - val
if val > self.order:
return 0
if val < 0:
return 0
return self.coeffs[ind]
def __setitem__(self, key, val):
ind = self.order - key
if key < 0:
raise ValueError("Does not support negative powers.")
if key > self.order:
zr = NX.zeros(key-self.order, self.coeffs.dtype)
self._coeffs = NX.concatenate((zr, self.coeffs))
ind = 0
self._coeffs[ind] = val
return
def __iter__(self):
return iter(self.coeffs)
def integ(self, m=1, k=0):
"""
Return an antiderivative (indefinite integral) of this polynomial.
Refer to `polyint` for full documentation.
See Also
--------
polyint : equivalent function
"""
return poly1d(polyint(self.coeffs, m=m, k=k))
def deriv(self, m=1):
"""
Return a derivative of this polynomial.
Refer to `polyder` for full documentation.
See Also
--------
polyder : equivalent function
"""
return poly1d(polyder(self.coeffs, m=m))
# Stuff to do on module import
warnings.simplefilter('always', RankWarning)
|
{
"content_hash": "24bbd00149d7a4c902a55b94a5e3c595",
"timestamp": "",
"source": "github",
"line_count": 1293,
"max_line_length": 79,
"avg_line_length": 29.60170146945089,
"alnum_prop": 0.5593207054212933,
"repo_name": "ssanderson/numpy",
"id": "50e6d8db276bcdf925045aa01619e10c0620208e",
"size": "38275",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numpy/lib/polynomial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "7471702"
},
{
"name": "C++",
"bytes": "164638"
},
{
"name": "Fortran",
"bytes": "10042"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "6519851"
}
],
"symlink_target": ""
}
|
import logging
import traceback
import ujson as json
from collections import *
from itertools import chain
from bs4 import BeautifulSoup, SoupStrainer
from openvenues.extract.util import *
logger = logging.getLogger('extract.soup')
def tag_value_and_attr(tag):
value_attr = None
value_attr = property_values.get(tag.name.lower())
if value_attr and value_attr in tag.attrs:
value = tag.attrs[value_attr]
else:
value = tag.text.strip()
return value, value_attr
def extract_links(soup):
def not_nofollow(rel):
return rel != 'nofollow'
for tag in soup.find_all('a', attrs={'href': True,
'rel': not_nofollow}):
link = tag['href']
# Make link absolute
link = urlparse.urljoin(url, link)
yield link
def extract_basic_metadata(soup):
title_tags = soup.select('meta[property="og:title"]') + soup.select('meta[name="title"]') + soup.find_all('title')
title = None
for t in title_tags:
value, value_attr = tag_value_and_attr(t)
if value and value.strip():
title = value.strip()
break
ret = {}
if title:
ret['title'] = title
description_tags = soup.select('meta[property="og:description"]') or soup.select('meta[name="description"]')
if description_tags:
for d in description_tags:
value, value_attr = tag_value_and_attr(d)
if value and value.strip():
description = value.strip()
ret['description'] = description
break
canonical = soup.select('link[rel="canonical"]')
if canonical and canonical[0].get('href'):
ret['canonical'] = canonical[0]['href']
alternates = soup.select('link[rel="alternate"]')
if alternates:
ret['alternates'] = [{'link': tag['href'],
'lang': tag.get('hreflang')
} for tag in alternates if tag.get('href')]
meta_tags = set(soup.select('meta[property]')) | set(soup.select('meta[name]'))
meta_dict = defaultdict(list)
for t in meta_tags:
name = t.get('property', t.get('name', '')).strip().lower()
value, value_attr = tag_value_and_attr(t)
if value and value.strip() and not name.startswith('og:') and not name.startswith('place:') and not name.startswith('business:'):
meta_dict[name].append(value)
if meta_dict:
ret['other_meta'] = dict(meta_dict)
rel_tag = soup.select('[rel="tag"]')
if rel_tag:
all_tags = []
for t in rel_tag:
tag = {}
value = t.text.strip()
if value:
tag['value'] = value
link, link_attr = tag_value_and_attr(t)
if link_attr and value:
tag['link'] = link_attr
tag['link_value'] = link
elif link_attr:
tag['value'] = link
tag['attr'] = link_attr
else:
continue
all_tags.append(tag)
ret['tags'] = all_tags
return ret
def extract_schema_dot_org(soup, use_rdfa=False):
items = []
scope_attr = 'itemtype'
prop_attr = 'itemprop'
schema_type = SCHEMA_DOT_ORG_TYPE if not use_rdfa else RDFA_TYPE
xmlns = None
if use_rdfa:
data_vocabulary = None
# Verify that we have xmlns defined
for tag in soup.find_all(True):
data_vocabulary = [k for k, v in tag.attrs.iteritems()
if k.startswith('xmlns:') and 'data-vocabulary' in v]
if data_vocabulary:
data_vocabulary = data_vocabulary[0]
break
if not data_vocabulary:
return items
else:
xmlns = data_vocabulary.split(':', 1)[-1]
queue = deque([(None, tag) for tag in soup.find_all(True, recursive=False)])
have_street = False
have_latlon = False
while queue:
parent_item, tag = queue.popleft()
if not tag.name:
continue
current_item = parent_item
item = None
prop = None
has_vocab = False
item_scope = tag.get(scope_attr)
if not item_scope and use_rdfa:
item_scope = tag.get('typeof', tag.get('vocab'))
if not item_scope or not item_scope.startswith('{}:'.format(xmlns)):
item_scope = None
item_prop = tag.get(prop_attr)
item_type = item_scope
if not item_prop and use_rdfa:
item_prop = tag.get('property')
if not item_prop or not item_prop.startswith('{}:'.format(xmlns)):
item_prop = tag.get('rel', [])
item_prop = [p for p in item_prop if p.startswith('{}:'.format(xmlns))]
if not item_prop:
item_prop = None
else:
item_prop = item_prop[0]
if item_prop:
prop_name = item_prop
if use_rdfa:
prop_name = prop_name.split(':', 1)[-1]
prop_name = prop_name.replace('-', '_')
prop = {'name': prop_name}
value_attr = None
if not item_scope:
value, value_attr = tag_value_and_attr(tag)
if use_rdfa and not value and tag.get('content'):
value, value_attr = tag['content'], 'content'
prop['value'] = value
attributes = {k: v for k, v in tag.attrs.iteritems() if k not in (scope_attr, prop_attr)}
if value_attr:
prop['text'] = tag.text.strip()
prop['value_attr'] = value_attr
if attributes:
prop['attributes'] = attributes
if current_item is not None:
current_item['properties'] = current_item['properties'] or []
current_item['properties'].append(prop)
if item_scope:
if prop is not None:
item = prop
else:
item = {}
is_place_item = False
if item_type:
if not use_rdfa:
item_type = item_type.split('/')[-1]
elif use_rdfa and xmlns and item_type.startswith('{}:'.format(xmlns)):
item_type = item_type.split(':', 1)[-1]
is_place_item = item_type.lower() in PLACE_SCHEMA_TYPES
item.update({
'item_type': schema_type,
'type': item_type,
})
item['properties'] = []
if is_place_item:
items.append(item)
current_item = item
queue.extend([(current_item, child) for child in tag.find_all(True, recursive=False)])
ret = []
for item in items:
have_street = False
have_latlon = False
item_type = item.get('item_type')
if item_type == 'schema.org':
for prop in item.get('properties', []):
name = prop.get('name', '').lower()
if name == 'address':
props = set([p.get('name', '').lower() for p in prop.get('properties', [])])
if props & street_props:
have_street = True
if len(latlon_props & props) >= 2:
have_latlon = True
if name == 'geo':
props = set([p.get('name') for p in prop.get('properties', [])])
if len(latlon_props & props) >= 2:
have_latlon = True
if name in latlon_props:
have_latlon = True
if name in street_props:
have_street = True
elif item_type == 'rdfa':
props = set([p.get('name', '').lower() for p in item.get('properties', [])])
have_street = props & street_props
have_latlon = len(props & latlon_props) >= 2
if have_street or have_latlon:
ret.append(item)
return ret
FACEBOOK = 'facebook'
TWITTER = 'twitter'
INSTAGRAM = 'instagram'
PINTEREST = 'pinterest'
YELP = 'yelp'
FOURSQUARE = 'foursquare'
GOOGLE_PLUS = 'google_plus'
YOUTUBE = 'youtube'
VIMEO = 'vimeo'
social_href_patterns = {
'facebook.com': FACEBOOK,
'twitter.com': TWITTER,
'instagram.com': INSTAGRAM,
'pinterest.com': PINTEREST,
'yelp.': YELP,
'foursquare': FOURSQUARE,
'plus.google': GOOGLE_PLUS,
'youtube': YOUTUBE,
'youtu.be': YOUTUBE,
'vimeo.com': VIMEO,
}
def extract_social_handles(soup):
max_matches = 0
ids = defaultdict(list)
for pattern, site in social_href_patterns.iteritems():
matches = soup.select(u'a[href*="{}"]'.format(pattern))
if len(matches) > max_matches:
max_matches = len(matches)
for m in matches:
value, value_attr = tag_value_and_attr(m)
ids[site].append(value)
return dict(ids)
value_attr_regex = re.compile("value-.*")
def extract_vcards(soup):
items = []
def gen_prop(name, selector):
prop = None
if selector:
result = selector[0]
prop = {'name': name}
val_select = result.select('.value')
if val_select:
value, value_attr = tag_value_and_attr(val_select[0])
else:
val_select = result.find_all(class_=value_attr_regex)
if not val_select:
value, value_attr = tag_value_and_attr(result)
else:
value_attr = val_select[0].attrs['class'][0].split('-', 1)[-1]
value = val_select[0].attrs.get(value_attr)
if not value:
value, value_attr = tag_value_and_attr(result)
text = (result.text or u'').strip()
if not value_attr:
prop['value'] = text
else:
prop['text'] = text
prop['value'] = value
prop['value_attr'] = value_attr
attributes = {k: v for k, v in result.attrs.iteritems() if k not in ('class', value_attr)}
if attributes:
prop['attributes'] = attributes
if 'text' not in prop and 'value' not in prop and 'attributes' not in prop:
prop = None
return prop
vcards = soup.select('.vcard')
if not vcards:
vcards = soup.select('.adr')
for vcard in vcards:
item = {}
properties = []
have_address = False
street = gen_prop('street_address', vcard.select('.street-address'))
if street:
properties.append(street)
have_address = True
locality = gen_prop('locality', vcard.select('.locality'))
if locality:
properties.append(locality)
region = gen_prop('region', vcard.select('.region'))
if region:
properties.append(region)
postal_code = gen_prop('postal_code', vcard.select('.postal-code'))
if postal_code:
properties.append(postal_code)
country = gen_prop('country', vcard.select('.country-name'))
if country:
properties.append(country)
have_latlon = False
latitude = gen_prop('latitude', vcard.select('.latitude'))
longitude = gen_prop('longitude', vcard.select('.longitude'))
if not latitude and longitude:
latitude = gen_prop('latitude', vcard.select('.p-latitude'))
longtitude = gen_prop('longitude', vcard.select('.p-longitude'))
if latitude and longitude:
properties.append(latitude)
properties.append(longitude)
have_latlon = True
if have_address or have_latlon:
org_name = gen_prop('org_name', vcard.select('.org'))
if org_name:
properties.append(org_name)
name = gen_prop('name', vcard.select('.fn'))
if name:
properties.append(name)
photo = gen_prop('photo', vcard.select('.photo'))
if photo:
properties.append(photo)
vcard_url = gen_prop('url', vcard.select('.url a'))
if not vcard_url:
vcard_url = gen_prop('url', vcard.select('a.url'))
if vcard_url:
properties.append(vcard_url)
telephone = gen_prop('telephone', vcard.select('.tel'))
if telephone:
properties.append(telephone)
category = gen_prop('category', vcard.select('.category'))
if category:
properties.append(category)
else:
continue
if properties:
item['item_type'] = VCARD_TYPE
item['properties'] = properties
items.append(item)
return items
def extract_address_elements(soup):
items = []
for addr in soup.select('address'):
html = unicode(addr)
items.append({'item_type': ADDRESS_ELEMENT_TYPE, 'address': BeautifulSoup(html).text.strip(),
'original_html': html})
return items
def extract_geotags(soup):
placename = soup.select('meta[name="geo.placename"]')
position = soup.select('meta[name="geo.position"]')
region = soup.select('meta[name="geo.region"]')
icbm = soup.select('meta[name="ICBM"]')
title = soup.select('meta[name="DC.title"]')
item = {}
if position:
position = position[0]
value, value_attr = tag_value_and_attr(position)
if value and value.strip():
item['geotags.position'] = value.strip()
if not position and icbm:
icbm = icbm[0]
value, value_attr = tag_value_and_attr(icbm)
if value and value.strip():
item['geotags.icbm'] = value.strip()
if placename:
placename = placename[0]
value, value_attr = tag_value_and_attr(placename)
if value:
item['geotags.placename'] = value.strip()
if region:
region = region[0]
value, value_attr = tag_value_and_attr(region)
if value:
item['geotags.region'] = value.strip()
if title:
title = title[0]
value, value_attr = tag_value_and_attr(title)
if value:
item['geotags.title'] = value.strip()
if item:
item['item_type'] = GEOTAG_TYPE
return item or None
def extract_opengraph_tags(soup):
og_attrs = {}
for el in soup.select('meta[property]'):
name = el['property'].strip().lower()
value = el.get('content', '').strip()
if name.startswith('og:') and value and name not in og_attrs:
og_attrs[name] = value
return og_attrs or None
def extract_opengraph_business_tags(soup):
og_attrs = {}
for el in soup.select('meta[property]'):
name = el['property'].strip().lower()
value = el.get('content', '').strip()
if (name.startswith('business:') or name.startswith('place:')) and value and name not in og_attrs:
og_attrs[name] = value
return og_attrs or None
def gen_og_props(og_tags, proplist, prefix='og'):
props = {}
for prop in proplist:
og_tag_name = '{}:{}'.format(prefix, prop)
value = og_tags.get(og_tag_name, '').strip()
if value:
props[og_tag_name] = value
return props
def opengraph_item(og_tags):
latitude_value = None
for val in ('og:latitude', 'og:lat'):
if val in og_tags:
latitude_value = val
longitude_value = None
for val in ('og:longitude', 'og:lng'):
if val in og_tags:
longitude_value = val
have_latlon = latitude_value and longitude_value
item = {}
if have_latlon:
try:
latitude = og_tags[latitude_value].strip()
longitude = og_tags[longitude_value].strip()
except Exception:
logger.error('Error in opengraph tags extracting lat/lon: {}'.format(traceback.format_exc()))
if latitude and longitude:
item['og:latitude'] = latitude
item['og:longitude'] = longitude
address_props = gen_og_props(og_tags, ['street-address', 'locality', 'region', 'postal-code', 'country-name', 'phone_number'])
have_address = len(address_props) > 0
if have_address:
item.update(address_props)
if have_address or have_latlon:
item['item_type'] = OG_TAG_TYPE
title_props = gen_og_props(og_tags, ['title', 'description', 'locale', 'site_name', 'type', 'url'])
item.update(title_props)
return item or None
def opengraph_business(og_tags):
item = {}
address_props = gen_og_props(og_tags, ['street_address', 'locality', 'region', 'postal_code',
'country', 'phone_number', 'website'], prefix='business:contact_data')
have_address = len(address_props) > 0
if have_address:
item.update(address_props)
latitude = og_tags.get('place:location:latitude', '').strip()
longitude = og_tags.get('place:location:longitude', '').strip()
have_latlon = latitude and longitude
if have_latlon:
item['place:location:latitude'] = latitude
item['place:location:longitude'] = longitude
if have_address or have_latlon:
item['item_type'] = OG_BUSINESS_TAG_TYPE
title_props = gen_og_props(og_tags, ['title', 'description', 'locale', 'site_name', 'type', 'url'])
item.update(title_props)
return item or None
google_maps_lat_lon_path_regex = re.compile('/maps.*?@[\d]+', re.I)
def item_from_google_maps_url(url):
query_param = 'q'
ll_param_names = ('ll', 'sll', 'center')
ll_param = 'll'
alt_ll_param = 'sll'
near_param_names = ('hnear', 'near')
daddr_param = 'daddr'
latitude = None
longitude = None
split = urlparse.urlsplit(url)
query_string = split.query
path = split.path
if query_string:
params = {k.lower(): v for k, v in urlparse.parse_qs(query_string).iteritems()}
for param in ll_param_names:
latlon = params.get(param)
try:
latitude, longitude = latlon_comma_splitter.split(latlon[0])
if not latitude and longitude:
continue
except Exception:
continue
query = params.get(query_param)
if query:
query = query[0]
for param in near_param_names:
near = params.get(param)
if near:
near = near[0]
break
daddr = params.get(daddr_param)
if daddr:
daddr = daddr[0]
item = {}
if latitude and longitude:
item['latitude'] = latitude
item['longitude'] = longitude
if query:
item['googlemaps.query'] = query
if near:
item['googlemaps.near'] = near
if daddr:
item['googlemaps.daddr'] = daddr
if item:
item['googlemaps.url'] = url
item['item_type'] = GOOGLE_MAP_EMBED_TYPE
return item
if path and google_maps_lat_lon_path_regex.search(path):
path_components = path.split('/')
for p in path_components:
if p.startswith('@'):
values = p.strip('@').split(',')
if len(values) >= 2:
latitude, longitude = values[:2]
if latitude and longitude:
item = {
'item_type': GOOGLE_MAP_EMBED_TYPE,
'latitude': latitude,
'longitude': longitude,
}
return item
return None
google_maps_href_regex = re.compile('google\.[^/]+\/maps', re.I)
google_maps_embed_regex = re.compile('google\.[^/]+\/maps/embed/.*/place', re.I)
def extract_google_map_embeds(soup):
items = []
iframe = soup.select('iframe[src*="maps.google"]')
if not iframe:
iframe = soup.find_all('iframe', src=google_maps_embed_regex)
seen = set()
if iframe:
for f in iframe:
u = f.get('src')
if u not in seen:
item = item_from_google_maps_url(u)
if item:
items.append(item)
seen.add(u)
a_tag = soup.select('a[href*="maps.google"]')
if not a_tag:
a_tag = soup.find_all('a', href=google_maps_href_regex)
if a_tag:
for a in a_tag:
u = a.get('href')
if u not in seen:
item = item_from_google_maps_url(u)
if item:
items.append(item)
seen.add(u)
static_maps = soup.select('img[src*="maps.google"]')
if static_maps:
for img in static_maps:
u = img.get('src')
if u not in seen:
item = item_from_google_maps_url(u)
if item:
items.append(item)
seen.add(u)
shortener_a_tag = soup.select('a[href*="goo.gl/maps"]')
if shortener_a_tag:
for a in a_tag:
u = a.get('href')
if u not in seen:
text = (a.text or '').strip()
item = {
'item_type': GOOGLE_MAP_SHORTENED,
'url': u,
}
if text:
item['anchor'] = text
items.append(item)
seen.add(u)
return items
def extract_data_lat_lon_attributes(soup):
lat = soup.find_all(attrs={'data-lat': True})
items = []
for tag in lat:
latitude = tag['data-lat'].strip()
longitude = tag.get('data-lng', tag.get('data-lon', tag.get('data-long', None)))
if latitude and longitude:
items.append({'item_type': DATA_LATLON_TYPE,
'latitude': latitude,
'longitude': longitude,
'attrs': tag.attrs
})
return items
hopstop_route_regex = re.compile('hopstop\.[^/]+/route')
hopstop_map_regex = re.compile('hopstop\.[^/]+/map')
def extract_hopstop_direction_embeds(soup):
hopstop_embeds = soup.find_all('a', href=hopstop_route_regex)
items = []
for tag in hopstop_embeds:
split = urlparse.urlsplit(tag.attrs['href'])
query_string = split.query
if query_string:
params = urlparse.parse_qs(query_string)
if params and 'address2' in params and 'zip2' in params:
item = {'item_type': HOPSTOP_ROUTE_TYPE,
'address': params['address2'][0],
'postal_code': params['zip2'][0]
}
items.append(item)
return items
def extract_hopstop_map_embeds(soup):
hopstop_embeds = soup.find_all('a', href=hopstop_map_regex)
items = []
for tag in hopstop_embeds:
split = urlparse.urlsplit(tag.attrs['href'])
query_string = split.query
if query_string:
params = urlparse.parse_qs(query_string)
if params and 'address' in params:
item = {'item_type': HOPSTOP_MAP_TYPE,
'address': params['address'][0]}
items.append(item)
return items
# Some big sites like yellowpages.com use this
def extract_mappoint_embeds(soup):
pushpins = soup.find_all(attrs={'data-pushpin': True})
items = []
if len(pushpins) == 1:
try:
item = json.loads(pushpins[0]['data-pushpin'])
latitude = item.get('lat', item.get('latitude'))
longitude = item.get('lon', item.get('long', item.get('longitude')))
if latitude and longitude:
return [{'item_type': MAPPOINT_EMBED_TYPE,
'mappoint.latitude': latitude,
'mappoint.longitude': longitude}]
except Exception:
logger.error('Error in extracting mappoint embed: {}'.format(traceback.format_exc()))
return []
def extract_items(soup):
items = []
schema_dot_org_items = extract_schema_dot_org(soup)
rdfa_items = extract_schema_dot_org(soup, use_rdfa=True)
vcards = extract_vcards(soup)
address_elements = extract_address_elements(soup)
opengraph_tags = extract_opengraph_tags(soup)
opengraph_business_tags = extract_opengraph_business_tags(soup)
google_maps_embeds = extract_google_map_embeds(soup)
geotags = extract_geotags(soup)
mappoint_pushpins = extract_mappoint_embeds(soup)
hopstop_route_embeds = extract_hopstop_direction_embeds(soup)
hopstop_map_embeds = extract_hopstop_map_embeds(soup)
data_latlon_attrs = extract_data_lat_lon_attributes(soup)
if geotags:
geotags = [geotags]
basic_metadata = extract_basic_metadata(soup)
items = list(chain(*(c for c in (schema_dot_org_items,
rdfa_items,
vcards,
address_elements,
geotags,
google_maps_embeds,
mappoint_pushpins,
hopstop_route_embeds,
hopstop_map_embeds,
data_latlon_attrs) if c)))
if opengraph_tags:
i = opengraph_item(opengraph_tags)
if i:
items.append(i)
if opengraph_business_tags:
i = opengraph_business(opengraph_business_tags)
if i:
items.append(i)
social_handles = extract_social_handles(soup)
ret = {}
if items:
ret['items'] = items
if social_handles:
ret['social'] = social_handles
if opengraph_tags:
ret['og'] = opengraph_tags
if basic_metadata:
ret.update(basic_metadata)
return ret
|
{
"content_hash": "fea4c1060795db44414ab88e249a2bc8",
"timestamp": "",
"source": "github",
"line_count": 822,
"max_line_length": 137,
"avg_line_length": 31.81508515815085,
"alnum_prop": 0.5307815845824411,
"repo_name": "openvenues/openvenues",
"id": "b3b04d2588de923e1cfa007a7b5cbac634d26455",
"size": "26152",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "openvenues/extract/soup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "812335"
},
{
"name": "Python",
"bytes": "76608"
}
],
"symlink_target": ""
}
|
from operator import itemgetter
import re
from django import template
from django.template.defaultfilters import stringfilter
from api.models import LocationMention, Location, Sentence
register = template.Library()
@register.filter
@stringfilter
def wrap_with_span(string, arg):
""" Wraps all instances of a string with a span element"""
words = arg.split(' ')
for word in words:
if word[-1].lower() == 's':
word = word[:-1]
pattern = re.compile(r'\b({0}[\w\d]*)\b'.format(word), flags=re.I)
for (match) in re.findall(pattern, string):
string = re.sub(r'{0}'.format(match),
'<span>{0}</span>'.format(match), string)
break;
return string.replace('&#x', '&#x')
@register.filter
@stringfilter
def add_locations(string, args):
""" Adds location links to a snippet string """
string = string.replace(' ', ' ')
locs = args[0]
text = args[1]
for loc in locs:
pattern = re.compile(r'(\b)({0})(\b)'.format(loc['location']), flags=re.I)
for (start, match, end) in re.findall(pattern, string):
string = re.sub(r'{0}'.format(match),
'<a href="/search/?loc={0}&text={1}">{2}</a>'.format(loc['locid'], text, match), string)
break;
return string
@register.tag
def make_list(parser, token):
bits = list(token.split_contents())
if len(bits) >= 4 and bits[-2] == "as":
varname = bits[-1]
items = bits[1:-2]
return MakeListNode(items, varname)
else:
raise template.TemplateSyntaxError("%r expected format is 'item [item ...] as varname'" % bits[0])
class MakeListNode(template.Node):
def __init__(self, items, varname):
self.items = map(template.Variable, items)
self.varname = varname
def render(self, context):
context[self.varname] = [ i.resolve(context) for i in self.items ]
return ""
|
{
"content_hash": "a6d7ba65545dbd91117a483cf25f6291",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 104,
"avg_line_length": 30.677419354838708,
"alnum_prop": 0.6146161934805467,
"repo_name": "LitPalimpsest/parser-api-search",
"id": "cc68f3da049eb4c6a3e0c419eb76fe47f73983e0",
"size": "1902",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "site/api/templatetags/api_extras.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23475"
},
{
"name": "HTML",
"bytes": "21757"
},
{
"name": "Python",
"bytes": "38272"
},
{
"name": "Shell",
"bytes": "1289"
}
],
"symlink_target": ""
}
|
import sys
#
# IDL Node
#
# IDL Node defines the IDLAttribute and IDLNode objects which are constructed
# by the parser as it processes the various 'productions'. The IDLAttribute
# objects are assigned to the IDLNode's property dictionary instead of being
# applied as children of The IDLNodes, so they do not exist in the final tree.
# The AST of IDLNodes is the output from the parsing state and will be used
# as the source data by the various generators.
#
#
# CopyToList
#
# Takes an input item, list, or None, and returns a new list of that set.
def CopyToList(item):
# If the item is 'Empty' make it an empty list
if not item:
item = []
# If the item is not a list
if type(item) is not type([]):
item = [item]
# Make a copy we can modify
return list(item)
# IDLSearch
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLSearch(object):
def __init__(self):
self.depth = 0
def Enter(self, node):
pass
def Exit(self, node):
pass
# IDLAttribute
#
# A temporary object used by the parsing process to hold an Extended Attribute
# which will be passed as a child to a standard IDLNode.
#
class IDLAttribute(object):
def __init__(self, name, value):
self._cls = 'Property'
self.name = name
self.value = value
def __str__(self):
return '%s=%s' % (self.name, self.value)
def GetClass(self):
return self._cls
#
# IDLNode
#
# This class implements the AST tree, providing the associations between
# parents and children. It also contains a namepsace and propertynode to
# allow for look-ups. IDLNode is derived from IDLRelease, so it is
# version aware.
#
class IDLNode(object):
VERBOSE_PROPS = [
'PROD', 'NAME', 'VALUE', 'TYPE',
'ERRORS', 'WARNINGS', 'FILENAME', 'LINENO', 'POSITION', 'DATETIME',
]
def __init__(self, cls, filename, lineno, pos, children=None):
self._cls = cls
self._properties = {
'ERRORS' : [],
'WARNINGS': [],
'FILENAME': filename,
'LINENO' : lineno,
'POSITION' : pos,
}
self._children = []
self._parent = None
self.AddChildren(children)
#
#
#
# Return a string representation of this node
def __str__(self):
name = self.GetProperty('NAME','')
value = self.GetProperty('VALUE')
if value or value == '':
return '%s(%s) = "%s"' % (self._cls, name, value)
return '%s(%s)' % (self._cls, name)
def GetLogLine(self, msg):
filename, lineno = self.GetFileAndLine()
return '%s(%d) : %s\n' % (filename, lineno, msg)
# Log an error for this object
def Error(self, msg):
self.GetProperty('ERRORS').append(msg)
sys.stderr.write(self.GetLogLine('error: ' + msg))
# Log a warning for this object
def Warning(self, msg):
self.GetProperty('WARNINGS').append(msg)
sys.stdout.write(self.GetLogLine('warning:' + msg))
# Return file and line number for where node was defined
def GetFileAndLine(self):
return self.GetProperty('FILENAME'), self.GetProperty('LINENO')
def GetClass(self):
return self._cls
def GetName(self):
return self.GetProperty('NAME')
def GetParent(self):
return self._parent
def Traverse(self, search, filter_nodes):
if self._cls in filter_nodes:
return ''
search.Enter(self)
search.depth += 1
for child in self._children:
child.Traverse(search, filter_nodes)
search.depth -= 1
search.Exit(self)
def Tree(self, filter_nodes=None, suppress_props=VERBOSE_PROPS):
class DumpTreeSearch(IDLSearch):
def __init__(self, props):
IDLSearch.__init__(self)
self.out = []
self.props = props or []
def Enter(self, node):
tab = ''.rjust(self.depth * 2)
self.out.append(tab + str(node))
proplist = []
for key, value in node.GetProperties().items():
if key not in self.props:
proplist.append(tab + ' %s: %s' % (key, str(value)))
if proplist:
self.out.extend(proplist)
if filter_nodes == None:
filter_nodes = ['SpecialComment']
search = DumpTreeSearch(suppress_props)
self.Traverse(search, filter_nodes)
return search.out
#
# Search related functions
#
# Check if node is of a given type
def IsA(self, *typelist):
if self._cls in typelist:
return True
return False
# Get a list of all children
def GetChildren(self):
return self._children
def GetListOf(self, *keys):
out = []
for child in self.GetChildren():
if child.GetClass() in keys:
out.append(child)
return out
def GetOneOf(self, *keys):
out = self.GetListOf(*keys)
if out:
return out[0]
return None
def AddChildren(self, children):
children = CopyToList(children)
for child in children:
if not child:
continue
if type(child) == IDLAttribute:
self.SetProperty(child.name, child.value)
continue
if type(child) == IDLNode:
child._parent = self
self._children.append(child)
continue
raise RuntimeError('Adding child of type %s.\n' % type(child).__name__)
#
# Property Functions
#
def SetProperty(self, name, val):
self._properties[name] = val
def GetProperty(self, name, default=None):
return self._properties.get(name, default)
def GetProperties(self):
return self._properties
|
{
"content_hash": "ff812a2587211f7ba652cf448e480b75",
"timestamp": "",
"source": "github",
"line_count": 221,
"max_line_length": 78,
"avg_line_length": 24.615384615384617,
"alnum_prop": 0.6419117647058824,
"repo_name": "ric2b/Vivaldi-browser",
"id": "d7284d8994c54fb33faa2542447f8a2d4e0f4c74",
"size": "5630",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chromium/tools/idl_parser/idl_node.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
livejs = '''
/*
Live.js - One script closer to Designing in the Browser
Written for Handcraft.com by Martin Kool (@mrtnkl).
Version 4.
Recent change: Made stylesheet and mimetype checks case insensitive.
http://livejs.com
http://livejs.com/license (MIT)
@livejs
Include live.js#css to monitor css changes only.
Include live.js#js to monitor js changes only.
Include live.js#html to monitor html changes only.
Mix and match to monitor a preferred combination such as live.js#html,css
By default, just include live.js to monitor all css, js and html changes.
Live.js can also be loaded as a bookmarklet. It is best to only use it for CSS then,
as a page reload due to a change in html or css would not re-include the bookmarklet.
To monitor CSS and be notified that it has loaded, include it as: live.js#css,notify
*/
(function () {
var headers = { "Etag": 1, "Last-Modified": 1, "Content-Length": 1, "Content-Type": 1 },
resources = {},
pendingRequests = {},
currentLinkElements = {},
oldLinkElements = {},
interval = 1000,
loaded = false,
active = { "html": 1, "css": 1, "js": 1 };
var Live = {
// performs a cycle per interval
heartbeat: function () {
if (document.body) {
// make sure all resources are loaded on first activation
if (!loaded) Live.loadresources();
Live.checkForChanges();
}
setTimeout(Live.heartbeat, interval);
},
// loads all local css and js resources upon first activation
loadresources: function () {
// helper method to assert if a given url is local
function isLocal(url) {
var loc = document.location,
reg = new RegExp("^\\.|^\/(?!\/)|^[\\w]((?!://).)*$|" + loc.protocol + "//" + loc.host);
return url.match(reg);
}
// gather all resources
var scripts = document.getElementsByTagName("script"),
links = document.getElementsByTagName("link"),
uris = [];
// track local js urls
for (var i = 0; i < scripts.length; i++) {
var script = scripts[i], src = script.getAttribute("src");
if (src && isLocal(src))
uris.push(src);
if (src && src.match(/\blive.js#/)) {
for (var type in active)
active[type] = src.match("[#,|]" + type) != null
if (src.match("notify"))
alert("Live.js is loaded.");
}
}
if (!active.js) uris = [];
if (active.html) uris.push(document.location.href);
// track local css urls
for (var i = 0; i < links.length && active.css; i++) {
var link = links[i], rel = link.getAttribute("rel"), href = link.getAttribute("href", 2);
if (href && rel && rel.match(new RegExp("stylesheet", "i")) && isLocal(href)) {
uris.push(href);
currentLinkElements[href] = link;
}
}
// initialize the resources info
for (var i = 0; i < uris.length; i++) {
var url = uris[i];
Live.getHead(url, function (url, info) {
resources[url] = info;
});
}
// add rule for morphing between old and new css files
var head = document.getElementsByTagName("head")[0],
style = document.createElement("style"),
rule = "transition: all .3s ease-out;"
css = [".livejs-loading * { ", rule, " -webkit-", rule, "-moz-", rule, "-o-", rule, "}"].join('');
style.setAttribute("type", "text/css");
head.appendChild(style);
style.styleSheet ? style.styleSheet.cssText = css : style.appendChild(document.createTextNode(css));
// yep
loaded = true;
},
// check all tracking resources for changes
checkForChanges: function () {
for (var url in resources) {
if (pendingRequests[url])
continue;
Live.getHead(url, function (url, newInfo) {
var oldInfo = resources[url],
hasChanged = false;
resources[url] = newInfo;
for (var header in oldInfo) {
// do verification based on the header type
var oldValue = oldInfo[header],
newValue = newInfo[header],
contentType = newInfo["Content-Type"];
switch (header.toLowerCase()) {
case "etag":
if (!newValue) break;
// fall through to default
default:
hasChanged = oldValue != newValue;
break;
}
// if changed, act
if (hasChanged) {
Live.refreshResource(url, contentType);
break;
}
}
});
}
},
// act upon a changed url of certain content type
refreshResource: function (url, type) {
switch (type.toLowerCase()) {
// css files can be reloaded dynamically by replacing the link element
case "text/css":
var link = currentLinkElements[url],
html = document.body.parentNode,
head = link.parentNode,
next = link.nextSibling,
newLink = document.createElement("link");
html.className = html.className.replace(/\s*livejs\-loading/gi, '') + ' livejs-loading';
newLink.setAttribute("type", "text/css");
newLink.setAttribute("rel", "stylesheet");
newLink.setAttribute("href", url + "?now=" + new Date() * 1);
next ? head.insertBefore(newLink, next) : head.appendChild(newLink);
currentLinkElements[url] = newLink;
oldLinkElements[url] = link;
// schedule removal of the old link
Live.removeoldLinkElements();
break;
// check if an html resource is our current url, then reload
case "text/html":
if (url != document.location.href)
return;
// local javascript changes cause a reload as well
case "text/javascript":
case "application/javascript":
case "application/x-javascript":
document.location.reload();
}
},
// removes the old stylesheet rules only once the new one has finished loading
removeoldLinkElements: function () {
var pending = 0;
for (var url in oldLinkElements) {
// if this sheet has any cssRules, delete the old link
try {
var link = currentLinkElements[url],
oldLink = oldLinkElements[url],
html = document.body.parentNode,
sheet = link.sheet || link.styleSheet,
rules = sheet.rules || sheet.cssRules;
if (rules.length >= 0) {
oldLink.parentNode.removeChild(oldLink);
delete oldLinkElements[url];
setTimeout(function () {
html.className = html.className.replace(/\s*livejs\-loading/gi, '');
}, 100);
}
} catch (e) {
pending++;
}
if (pending) setTimeout(Live.removeoldLinkElements, 50);
}
},
// performs a HEAD request and passes the header info to the given callback
getHead: function (url, callback) {
pendingRequests[url] = true;
var xhr = window.XMLHttpRequest ? new XMLHttpRequest() : new ActiveXObject("Microsoft.XmlHttp");
xhr.open("HEAD", url, true);
xhr.onreadystatechange = function () {
delete pendingRequests[url];
if (xhr.readyState == 4 && xhr.status != 304) {
xhr.getAllResponseHeaders();
var info = {};
for (var h in headers) {
var value = xhr.getResponseHeader(h);
// adjust the simple Etag variant to match on its significant part
if (h.toLowerCase() == "etag" && value) value = value.replace(/^W\//, '');
if (h.toLowerCase() == "content-type" && value) value = value.replace(/^(.*?);.*?$/i, "$1");
info[h] = value;
}
callback(url, info);
}
}
xhr.send();
}
};
// start listening
if (document.location.protocol != "file:") {
if (!window.liveJsLoaded)
Live.heartbeat();
window.liveJsLoaded = true;
}
else if (window.console)
console.log("Live.js doesn't support the file protocol. It needs http.");
})();
'''
|
{
"content_hash": "0928b159076d79a316bcaf05c7a1c987",
"timestamp": "",
"source": "github",
"line_count": 235,
"max_line_length": 106,
"avg_line_length": 35.2,
"alnum_prop": 0.566247582205029,
"repo_name": "cbednarski/icecake",
"id": "d6d4e47c1ce723a9bac34913fb4790cc08de32bf",
"size": "8272",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "icecake/livejs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7259"
},
{
"name": "Dockerfile",
"bytes": "841"
},
{
"name": "HTML",
"bytes": "2437"
},
{
"name": "Makefile",
"bytes": "755"
},
{
"name": "Python",
"bytes": "58341"
}
],
"symlink_target": ""
}
|
import os
import ssl
from oslo.config import cfg
from murano.openstack.common.gettextutils import _
ssl_opts = [
cfg.StrOpt('ca_file',
help="CA certificate file to use to verify "
"connecting clients."),
cfg.StrOpt('cert_file',
help="Certificate file to use when starting "
"the server securely."),
cfg.StrOpt('key_file',
help="Private key file to use when starting "
"the server securely."),
]
CONF = cfg.CONF
CONF.register_opts(ssl_opts, "ssl")
def is_enabled():
cert_file = CONF.ssl.cert_file
key_file = CONF.ssl.key_file
ca_file = CONF.ssl.ca_file
use_ssl = cert_file or key_file
if cert_file and not os.path.exists(cert_file):
raise RuntimeError(_("Unable to find cert_file : %s") % cert_file)
if ca_file and not os.path.exists(ca_file):
raise RuntimeError(_("Unable to find ca_file : %s") % ca_file)
if key_file and not os.path.exists(key_file):
raise RuntimeError(_("Unable to find key_file : %s") % key_file)
if use_ssl and (not cert_file or not key_file):
raise RuntimeError(_("When running server in SSL mode, you must "
"specify both a cert_file and key_file "
"option value in your configuration file"))
return use_ssl
def wrap(sock):
ssl_kwargs = {
'server_side': True,
'certfile': CONF.ssl.cert_file,
'keyfile': CONF.ssl.key_file,
'cert_reqs': ssl.CERT_NONE,
}
if CONF.ssl.ca_file:
ssl_kwargs['ca_certs'] = CONF.ssl.ca_file
ssl_kwargs['cert_reqs'] = ssl.CERT_REQUIRED
return ssl.wrap_socket(sock, **ssl_kwargs)
_SSL_PROTOCOLS = {
"tlsv1": ssl.PROTOCOL_TLSv1,
"sslv23": ssl.PROTOCOL_SSLv23,
"sslv3": ssl.PROTOCOL_SSLv3
}
try:
_SSL_PROTOCOLS["sslv2"] = ssl.PROTOCOL_SSLv2
except AttributeError:
pass
def validate_ssl_version(version):
key = version.lower()
try:
return _SSL_PROTOCOLS[key]
except KeyError:
raise RuntimeError(_("Invalid SSL version : %s") % version)
|
{
"content_hash": "eef82f40ff0a2ace6b5d2cb3ee8fb25b",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 74,
"avg_line_length": 26.604938271604937,
"alnum_prop": 0.5976798143851508,
"repo_name": "sergmelikyan/murano",
"id": "57862251154c89819a6fe93af4d0d8f194787494",
"size": "2757",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "murano/openstack/common/sslutils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PowerShell",
"bytes": "8634"
},
{
"name": "Python",
"bytes": "873914"
},
{
"name": "Shell",
"bytes": "5656"
}
],
"symlink_target": ""
}
|
from medoly import kanon
from medoly import anthem
@kanon.menu("/")
class HomeHandler(anthem.Handler):
entry_thing = kanon.Melos("thing:Entry")
def get(self):
entries = self.entry_thing.list_entries()
if not entries:
self.redirect("/compose")
return
self.render("home.html", entries=entries)
|
{
"content_hash": "9dc6a056e022881d3a03742e0ebd9de6",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 49,
"avg_line_length": 23.466666666666665,
"alnum_prop": 0.6306818181818182,
"repo_name": "whiteclover/Medoly",
"id": "d67dcb6af27b3d725698fb894b25b92643c529e5",
"size": "375",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/blog/blog/app/index.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "911"
},
{
"name": "Python",
"bytes": "165054"
}
],
"symlink_target": ""
}
|
'''
Created on Jan 17, 2016
@author: RJMurray
'''
# need: MUSSQ implementation, cluster definition, machine definition
import numpy as np
import random
class Controller():
def __init__(self, clusterSpecs):
# clusterSpecs is a list "numClusters" lists
self.numClusters = len(clusterSpecs)
self.memberClusters = []
for i in range(self.numClusters):
self.memberClusters.append(Cluster(i,clusterSpecs[i]))
class Cluster():
def __init__(self, inId, inMachineSpeeds):
inMachineSpeeds.sort(reverse=True)
self.numMachines = len(inMachineSpeeds)
self.machines = []
self.id = inId
for i in range(self.numMachines):
self.machines.append(Machine(i, inMachineSpeeds[i]))
def schedJobOnEarliestCompMachines(self, job, procReqs):
# right now, "job" isn't used (likely will be later)
# schedule one task at a time
if (not all(procReqs[i] >= procReqs[i+1] for i in xrange(len(procReqs)-1))):
procReqs.sort(reverse=True)
for pr in procReqs:
# identify the machine on which this task will finish first
bestMach = self.machines[0];
bestCompTime = np.inf
for i in range(self.numMachines):
compTime = self.machines[i].nextFree + (pr / self.machines[i].speed)
if (compTime <= bestCompTime):
bestCompTime = compTime
bestMach = self.machines[i]
# identified best machine; schedule this task!
bestMach.nextFree = bestCompTime
class Machine():
def __init__(self, inId, inSpeed):
self.speed = inSpeed
self.id = inId
self.nextFree = 0.0
def mussq(P,w):
# Inputs:
# P = 2D array matrix of processing times,
# one job per row (i.e. P[j,:] is data for job "j")
# one cluster per column (sub-jobs are 1-to-1 with clusters)
# w = an array of positive floats (w[j] is weight for job "j")
# Outputs:
# sigma = permutation of the [number-of-rows-of-P] jobs
# initialize
n = P.shape[0] # number of jobs
J = set(range(n)) # set of as-yet-unscheduled jobs
sigma = np.zeros([1,n], dtype=np.int)
L = np.sum(P, axis=0)
# bulk of routine
for k in reversed(range(n)):
bottleneck = np.argmax(L)
bestjob = random.sample(J)
smallestRatio = np.inf
for j in J:
if (P[j,bottleneck] > 0):
ratio = w[j] / P[j,bottleneck]
if (ratio <= smallestRatio):
bestjob = j
smallestRatio = ratio
sigma[k] = bestjob
theta = w[sigma[k]] / P[sigma[k],bottleneck]
for j in J:
w[j] = w[j] - theta * P[sigma[k],bottleneck]
L = L - P[sigma[k],:]
J.remove(sigma[k])
return sigma
|
{
"content_hash": "b7caa2055ce696adddb567c968836c39",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 84,
"avg_line_length": 32.65555555555556,
"alnum_prop": 0.5634569581490303,
"repo_name": "rileyjmurray/REUcode",
"id": "61d6ce367ec7fab016e16f8d5d0f1ece04fd307c",
"size": "2939",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ccSchedSim/cc_environment.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "69268"
},
{
"name": "Matlab",
"bytes": "82277"
}
],
"symlink_target": ""
}
|
"""Helper for building projects from source."""
from __future__ import unicode_literals
import datetime
import glob
import logging
import os
import platform
import re
import shutil
import subprocess
import tarfile
import zipfile
from l2tdevtools.build_helpers import interface
from l2tdevtools import dpkg_files
class DPKGBuildHelper(interface.BuildHelper):
"""Helper to build dpkg packages (.deb).
Attributes:
architecture (str): dpkg target architecture.
distribution (str): dpkg target distributions.
version_suffix (str): dpkg version suffix.
"""
_BUILD_DEPENDENCIES = frozenset([
'git',
'build-essential',
'autotools-dev',
'autoconf',
'automake',
'autopoint',
'dh-autoreconf',
'libtool',
'gettext',
'flex',
'byacc',
'debhelper',
'devscripts',
'dpkg-dev',
'fakeroot',
'quilt',
'python-all',
'python-all-dev',
'python-setuptools',
'python3-all',
'python3-all-dev',
'python3-setuptools',
])
_BUILD_DEPENDENCY_PACKAGE_NAMES = {
'bzip2': 'libbz2-dev',
'fuse': 'libfuse-dev',
'libcrypto': 'libssl-dev',
'sqlite': 'libsqlite3-dev',
'zeromq': 'libzmq3-dev',
'zlib': 'zlib1g-dev'
}
def __init__(self, project_definition, l2tdevtools_path):
"""Initializes a build helper.
Args:
project_definition (ProjectDefinition): project definition.
l2tdevtools_path (str): path to the l2tdevtools directory.
"""
super(DPKGBuildHelper, self).__init__(project_definition, l2tdevtools_path)
self._prep_script = 'prep-dpkg.sh'
self._post_script = 'post-dpkg.sh'
self.architecture = None
self.distribution = None
self.version_suffix = None
def _BuildPrepare(
self, source_directory, project_name, project_version, version_suffix,
distribution, architecture):
"""Make the necessary preparations before building the dpkg packages.
Args:
source_directory (str): name of the source directory.
project_name (str): name of the project.
project_version (str): version of the project.
version_suffix (str): version suffix.
distribution (str): distribution.
architecture (str): architecture.
Returns:
bool: True if the preparations were successful, False otherwise.
"""
# Script to run before building, e.g. to change the dpkg packaging files.
if os.path.exists(self._prep_script):
command = 'sh ../{0:s} {1:s} {2!s} {3:s} {4:s} {5:s}'.format(
self._prep_script, project_name, project_version, version_suffix,
distribution, architecture)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
return True
def _BuildFinalize(
self, source_directory, project_name, project_version, version_suffix,
distribution, architecture):
"""Make the necessary finalizations after building the dpkg packages.
Args:
source_directory (str): name of the source directory.
project_name (str): name of the project.
project_version (str): version of the project.
version_suffix (str): version suffix.
distribution (str): distribution.
architecture (str): architecture.
Returns:
bool: True if the finalizations were successful, False otherwise.
"""
# Script to run after building, e.g. to automatically upload the dpkg
# package files to an apt repository.
if os.path.exists(self._post_script):
command = 'sh ../{0:s} {1:s} {2!s} {3:s} {4:s} {5:s}'.format(
self._post_script, project_name, project_version, version_suffix,
distribution, architecture)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
return True
def _CheckIsInstalled(self, package_name):
"""Checks if a package is installed.
Args:
package_name (str): name of the package.
Returns:
bool: True if the package is installed, False otherwise.
"""
command = 'dpkg-query -s {0:s} >/dev/null 2>&1'.format(package_name)
exit_code = subprocess.call(command, shell=True)
return exit_code == 0
def _CreateOriginalSourcePackage(
self, source_filename, project_name, project_version):
"""Creates the .orig.tar.gz source package.
Args:
source_filename (str): name of the source package file.
project_name (str): project name.
project_version (str): version of the project.
"""
if self._project_definition.dpkg_source_name:
project_name = self._project_definition.dpkg_source_name
deb_orig_source_filename = '{0:s}_{1!s}.orig.tar.gz'.format(
project_name, project_version)
if self.version_suffix and self.distribution:
deb_orig_source_filename = '{0:s}_{1!s}{2:s}~{3:s}.orig.tar.gz'.format(
project_name, project_version, self.version_suffix, self.distribution)
if os.path.exists(deb_orig_source_filename):
return
if source_filename.endswith('.zip'):
self._CreateOriginalSourcePackageFromZip(
source_filename, deb_orig_source_filename)
else:
# TODO: add fix psutil package name.
shutil.copy(source_filename, deb_orig_source_filename)
def _CreateOriginalSourcePackageFromZip(
self, source_filename, orig_source_filename):
"""Creates the .orig.tar.gz source package from a .zip file.
Args:
source_filename (str): name of the source package file.
orig_source_filename (str): name of the .orig.tar.gz source package file.
"""
with zipfile.ZipFile(source_filename, 'r') as zip_file:
with tarfile.open(name=orig_source_filename, mode='w:gz') as tar_file:
for filename in zip_file.namelist():
with zip_file.open(filename) as file_object:
zip_info = zip_file.getinfo(filename)
tar_info = tarfile.TarInfo(filename)
tar_info.size = zip_info.file_size
# Populate modification times from zip file into tar archive,
# as launchpad refuses to build packages containing files with
# timestamps too far in the past.
date_time = zip_info.date_time
mtime = datetime.datetime(*date_time)
mtime = int((mtime - datetime.datetime(1970, 1, 1)).total_seconds())
tar_info.mtime = mtime
tar_file.addfile(tar_info, fileobj=file_object)
def _CreatePackagingFiles(
self, source_helper_object, source_directory, project_version):
"""Creates packaging files.
Args:
source_helper_object (SourceHelper): source helper.
source_directory (str): name of the source directory.
project_version (str): project version.
Returns:
bool: True if successful, False otherwise.
"""
debian_directory = os.path.join(source_directory, 'debian')
# If there is a debian directory remove it and recreate it from
# the dpkg directory.
if os.path.exists(debian_directory):
logging.info('Removing: {0:s}'.format(debian_directory))
shutil.rmtree(debian_directory)
dpkg_directory = os.path.join(source_directory, 'dpkg')
if not os.path.exists(dpkg_directory):
dpkg_directory = os.path.join(source_directory, 'config', 'dpkg')
if os.path.exists(dpkg_directory):
shutil.copytree(dpkg_directory, debian_directory)
else:
os.chdir(source_directory)
build_files_generator = dpkg_files.DPKGBuildFilesGenerator(
source_helper_object.project_name, project_version,
self._project_definition, self._data_path)
build_files_generator.GenerateFiles('debian')
os.chdir('..')
if not os.path.exists(debian_directory):
logging.error('Missing debian sub directory in: {0:s}'.format(
source_directory))
return False
return True
def _RemoveOlderDPKGPackages(self, project_name, project_version):
"""Removes previous versions of dpkg packages.
Args:
project_name (str): project name.
project_version (str): project version.
"""
filenames_to_ignore = '^{0:s}[-_].*{1!s}'.format(
project_name, project_version)
filenames_to_ignore = re.compile(filenames_to_ignore)
# Remove files of previous versions in the format:
# project[-_]*version-[1-9]_architecture.*
filenames_glob = '{0:s}[-_]*-[1-9]_{1:s}.*'.format(
project_name, self.architecture)
filenames = glob.glob(filenames_glob)
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info('Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove files of previous versions in the format:
# project[-_]*version-[1-9].*
filenames_glob = '{0:s}[-_]*-[1-9].*'.format(project_name)
filenames = glob.glob(filenames_glob)
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info('Removing: {0:s}'.format(filename))
os.remove(filename)
def _RemoveOlderOriginalSourcePackage(
self, project_name, project_version, version_suffix=None,
distribution=None):
"""Removes previous versions of original source package.
Args:
project_name (str): name of the project.
project_version (str): version of the project.
version_suffix (str): version suffix.
distribution (str): distribution.
"""
if version_suffix and distribution:
filenames_to_ignore = '{0:s}_{1:s}{2:s}~{3:s}.orig.tar.gz'.format(
project_name, project_version, version_suffix, distribution)
else:
filenames_to_ignore = '^{0:s}_{1!s}.orig.tar.gz'.format(
project_name, project_version)
filenames_to_ignore = re.compile(filenames_to_ignore)
# Remove files of previous versions in the format:
# project_version.orig.tar.gz
if version_suffix and distribution:
filenames_glob = '{0:s}_*{1:s}~{2:s}.orig.tar.gz'.format(
project_name, version_suffix, distribution)
else:
filenames_glob = '{0:s}_*.orig.tar.gz'.format(project_name)
filenames = glob.glob(filenames_glob)
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info('Removing: {0:s}'.format(filename))
os.remove(filename)
def _RemoveOlderSourceDPKGPackages(self, project_name, project_version):
"""Removes previous versions of source dpkg packages.
Args:
project_name (str): project name.
project_version (str): project version.
"""
filenames_to_ignore = '^{0:s}[-_].*{1!s}'.format(
project_name, project_version)
filenames_to_ignore = re.compile(filenames_to_ignore)
# Remove files of previous versions in the format:
# project[-_]version-[1-9]suffix~distribution_architecture.*
filenames_glob = '{0:s}[-_]*-[1-9]{1:s}~{2:s}_{3:s}.*'.format(
project_name, self.version_suffix, self.distribution, self.architecture)
filenames = glob.glob(filenames_glob)
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info('Removing: {0:s}'.format(filename))
os.remove(filename)
# Remove files of previous versions in the format:
# project[-_]*version-[1-9]suffix~distribution.*
filenames_glob = '{0:s}[-_]*-[1-9]{1:s}~{2:s}.*'.format(
project_name, self.version_suffix, self.distribution)
filenames = glob.glob(filenames_glob)
for filename in filenames:
if not filenames_to_ignore.match(filename):
logging.info('Removing: {0:s}'.format(filename))
os.remove(filename)
def CheckBuildDependencies(self):
"""Checks if the build dependencies are met.
Returns:
list[str]: build dependency names that are not met or an empty list.
"""
missing_packages = []
for package_name in self._BUILD_DEPENDENCIES:
if not self._CheckIsInstalled(package_name):
missing_packages.append(package_name)
for package_name in self._project_definition.build_dependencies:
package_name = self._BUILD_DEPENDENCY_PACKAGE_NAMES.get(
package_name, [package_name])
if not self._CheckIsInstalled(package_name):
missing_packages.append(package_name)
if package_name not in (
self._project_definition.dpkg_build_dependencies):
self._project_definition.dpkg_build_dependencies.append(
package_name)
return missing_packages
class ConfigureMakeDPKGBuildHelper(DPKGBuildHelper):
"""Helper to build dpkg packages (.deb)."""
def __init__(self, project_definition, l2tdevtools_path):
"""Initializes a build helper.
Args:
project_definition (ProjectDefinition): project definition.
l2tdevtools_path (str): path to the l2tdevtools directory.
"""
super(ConfigureMakeDPKGBuildHelper, self).__init__(
project_definition, l2tdevtools_path)
self.architecture = platform.machine()
self.distribution = ''
self.version_suffix = ''
if self.architecture == 'i686':
self.architecture = 'i386'
elif self.architecture == 'x86_64':
self.architecture = 'amd64'
def Build(self, source_helper_object):
"""Builds the dpkg packages.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
project_version = source_helper_object.GetProjectVersion()
# dpkg-buildpackage wants an source package filename without
# the status indication and orig indication.
self._CreateOriginalSourcePackage(
source_filename, source_helper_object.project_name, project_version)
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building deb of: {0:s}'.format(source_filename))
if not self._CreatePackagingFiles(
source_helper_object, source_directory, project_version):
return False
# If there is a temporary packaging directory remove it.
temporary_directory = os.path.join(source_directory, 'tmp')
if os.path.exists(temporary_directory):
logging.info('Removing: {0:s}'.format(temporary_directory))
shutil.rmtree(temporary_directory)
if not self._BuildPrepare(
source_directory, source_helper_object.project_name, project_version,
self.version_suffix, self.distribution, self.architecture):
return False
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = 'dpkg-buildpackage -uc -us -rfakeroot > {0:s} 2>&1'.format(
log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error('Running: "{0:s}" failed.'.format(command))
return False
if not self._BuildFinalize(
source_directory, source_helper_object.project_name, project_version,
self.version_suffix, self.distribution, self.architecture):
return False
return True
def CheckBuildRequired(self, source_helper_object):
"""Checks if a build is required.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if a build is required, False otherwise.
"""
project_version = source_helper_object.GetProjectVersion()
deb_filename = '{0:s}_{1!s}-1_{2:s}.deb'.format(
source_helper_object.project_name, project_version, self.architecture)
return not os.path.exists(deb_filename)
def Clean(self, source_helper_object):
"""Cleans the dpkg packages in the current directory.
Args:
source_helper_object (SourceHelper): source helper.
"""
project_version = source_helper_object.GetProjectVersion()
self._RemoveOlderOriginalSourcePackage(
source_helper_object.project_name, project_version)
self._RemoveOlderDPKGPackages(
source_helper_object.project_name, project_version)
class ConfigureMakeSourceDPKGBuildHelper(DPKGBuildHelper):
"""Helper to build source dpkg packages (.deb)."""
def __init__(self, project_definition, l2tdevtools_path):
"""Initializes a build helper.
Args:
project_definition (ProjectDefinition): project definition.
l2tdevtools_path (str): path to the l2tdevtools directory.
"""
super(ConfigureMakeSourceDPKGBuildHelper, self).__init__(
project_definition, l2tdevtools_path)
self._prep_script = 'prep-dpkg-source.sh'
self._post_script = 'post-dpkg-source.sh'
self.architecture = 'source'
self.distribution = 'trusty'
self.version_suffix = 'ppa1'
def Build(self, source_helper_object):
"""Builds the dpkg packages.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
project_version = source_helper_object.GetProjectVersion()
self._CreateOriginalSourcePackage(
source_filename, source_helper_object.project_name, project_version)
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building source deb of: {0:s}'.format(source_filename))
if not self._CreatePackagingFiles(
source_helper_object, source_directory, project_version):
return False
# If there is a temporary packaging directory remove it.
temporary_directory = os.path.join(source_directory, 'tmp')
if os.path.exists(temporary_directory):
logging.info('Removing: {0:s}'.format(temporary_directory))
shutil.rmtree(temporary_directory)
if not self._BuildPrepare(
source_directory, source_helper_object.project_name, project_version,
self.version_suffix, self.distribution, self.architecture):
return False
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = 'debuild -S -sa > {0:s} 2>&1'.format(log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error(
'Failed to run: "(cd {0:s} && {1:s}" with exit code {2:d}.'.format(
source_directory, command, exit_code))
return False
if not self._BuildFinalize(
source_directory, source_helper_object.project_name, project_version,
self.version_suffix, self.distribution, self.architecture):
return False
return True
def CheckBuildRequired(self, source_helper_object):
"""Checks if a build is required.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if a build is required, False otherwise.
"""
project_version = source_helper_object.GetProjectVersion()
changes_filename = '{0:s}_{1!s}-1{2:s}~{3:s}_{4:s}.changes'.format(
source_helper_object.project_name, project_version,
self.version_suffix, self.distribution, self.architecture)
return not os.path.exists(changes_filename)
def Clean(self, source_helper_object):
"""Cleans the source dpkg packages in the current directory.
Args:
source_helper_object (SourceHelper): source helper.
"""
project_version = source_helper_object.GetProjectVersion()
self._RemoveOlderOriginalSourcePackage(
source_helper_object.project_name, project_version)
self._RemoveOlderSourceDPKGPackages(
source_helper_object.project_name, project_version)
class SetupPyDPKGBuildHelper(DPKGBuildHelper):
"""Helper to build dpkg packages (.deb)."""
def __init__(self, project_definition, l2tdevtools_path):
"""Initializes a build helper.
Args:
project_definition (ProjectDefinition): project definition.
l2tdevtools_path (str): path to the l2tdevtools directory.
"""
super(SetupPyDPKGBuildHelper, self).__init__(
project_definition, l2tdevtools_path)
self.architecture = platform.machine()
self.distribution = ''
self.version_suffix = ''
if not project_definition.architecture_dependent:
self.architecture = 'all'
elif self.architecture == 'i686':
self.architecture = 'i386'
elif self.architecture == 'x86_64':
self.architecture = 'amd64'
def _GetFilenameSafeProjectInformation(self, source_helper_object):
"""Determines the filename safe project name and version.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
tuple: contains:
* str: filename safe project name.
* str: version.
"""
if self._project_definition.dpkg_name:
project_name = self._project_definition.dpkg_name
else:
project_name = source_helper_object.project_name
if not project_name.startswith('python-'):
project_name = 'python-{0:s}'.format(project_name)
project_version = source_helper_object.GetProjectVersion()
if project_version and project_version.startswith('1!'):
# Remove setuptools epoch.
project_version = project_version[2:]
return project_name, project_version
def Build(self, source_helper_object):
"""Builds the dpkg packages.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
project_name, project_version = self._GetFilenameSafeProjectInformation(
source_helper_object)
# dpkg-buildpackage wants an source package filename without
# the status indication and orig indication.
self._CreateOriginalSourcePackage(
source_filename, source_helper_object.project_name, project_version)
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building deb of: {0:s}'.format(source_filename))
if not self._CreatePackagingFiles(
source_helper_object, source_directory, project_version):
return False
# If there is a temporary packaging directory remove it.
temporary_directory = os.path.join(source_directory, 'tmp')
if os.path.exists(temporary_directory):
logging.info('Removing: {0:s}'.format(temporary_directory))
shutil.rmtree(temporary_directory)
if not self._BuildPrepare(
source_directory, project_name, project_version, self.version_suffix,
self.distribution, self.architecture):
return False
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = 'dpkg-buildpackage -uc -us -rfakeroot > {0:s} 2>&1'.format(
log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error(
'Failed to run: "(cd {0:s} && {1:s}" with exit code {2:d}.'.format(
source_directory, command, exit_code))
return False
if not self._BuildFinalize(
source_directory, project_name, project_version, self.version_suffix,
self.distribution, self.architecture):
return False
return True
def CheckBuildRequired(self, source_helper_object):
"""Checks if a build is required.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if a build is required, False otherwise.
"""
project_name, project_version = self._GetFilenameSafeProjectInformation(
source_helper_object)
deb_filename = '{0:s}_{1!s}-1_{2:s}.deb'.format(
project_name, project_version, self.architecture)
return not os.path.exists(deb_filename)
def Clean(self, source_helper_object):
"""Cleans the dpkg packages in the current directory.
Args:
source_helper_object (SourceHelper): source helper.
"""
project_name, project_version = self._GetFilenameSafeProjectInformation(
source_helper_object)
self._RemoveOlderOriginalSourcePackage(
source_helper_object.project_name, project_version)
self._RemoveOlderDPKGPackages(project_name, project_version)
if not self._IsPython2Only():
project_name = 'python3-{0:s}'.format(project_name[7])
self._RemoveOlderDPKGPackages(project_name, project_version)
class SetupPySourceDPKGBuildHelper(DPKGBuildHelper):
"""Helper to build source dpkg packages (.deb)."""
def __init__(self, project_definition, l2tdevtools_path):
"""Initializes a build helper.
Args:
project_definition (ProjectDefinition): project definition.
l2tdevtools_path (str): path to the l2tdevtools directory.
"""
super(SetupPySourceDPKGBuildHelper, self).__init__(
project_definition, l2tdevtools_path)
self._prep_script = 'prep-dpkg-source.sh'
self._post_script = 'post-dpkg-source.sh'
self.architecture = 'source'
self.distribution = 'trusty'
self.version_suffix = 'ppa1'
def _GetFilenameSafeProjectInformation(self, source_helper_object):
"""Determines the filename safe project name and version.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
tuple: contains:
* str: filename safe project name.
* str: version.
"""
if self._project_definition.dpkg_source_name:
project_name = self._project_definition.dpkg_source_name
else:
project_name = source_helper_object.project_name
if not project_name.startswith('python-'):
project_name = 'python-{0:s}'.format(project_name)
project_version = source_helper_object.GetProjectVersion()
if project_version and project_version.startswith('1!'):
# Remove setuptools epoch.
project_version = project_version[2:]
return project_name, project_version
def Build(self, source_helper_object):
"""Builds the dpkg packages.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if successful, False otherwise.
"""
source_filename = source_helper_object.Download()
if not source_filename:
logging.info('Download of: {0:s} failed'.format(
source_helper_object.project_name))
return False
project_name, project_version = self._GetFilenameSafeProjectInformation(
source_helper_object)
self._CreateOriginalSourcePackage(
source_filename, source_helper_object.project_name, project_version)
source_directory = source_helper_object.Create()
if not source_directory:
logging.error(
'Extraction of source package: {0:s} failed'.format(source_filename))
return False
logging.info('Building source deb of: {0:s}'.format(source_filename))
if not self._CreatePackagingFiles(
source_helper_object, source_directory, project_version):
return False
# If there is a temporary packaging directory remove it.
temporary_directory = os.path.join(source_directory, 'tmp')
if os.path.exists(temporary_directory):
logging.info('Removing: {0:s}'.format(temporary_directory))
shutil.rmtree(temporary_directory)
if not self._BuildPrepare(
source_directory, project_name, project_version, self.version_suffix,
self.distribution, self.architecture):
return False
log_file_path = os.path.join('..', self.LOG_FILENAME)
command = 'debuild -S -sa > {0:s} 2>&1'.format(log_file_path)
exit_code = subprocess.call('(cd {0:s} && {1:s})'.format(
source_directory, command), shell=True)
if exit_code != 0:
logging.error(
'Failed to run: "(cd {0:s} && {1:s}" with exit code {2:d}.'.format(
source_directory, command, exit_code))
if not self._BuildFinalize(
source_directory, project_name, project_version, self.version_suffix,
self.distribution, self.architecture):
return False
return True
def CheckBuildRequired(self, source_helper_object):
"""Checks if a build is required.
Args:
source_helper_object (SourceHelper): source helper.
Returns:
bool: True if a build is required, False otherwise.
"""
project_name, project_version = self._GetFilenameSafeProjectInformation(
source_helper_object)
changes_filename = '{0:s}_{1!s}-1{2:s}~{3:s}_{4:s}.changes'.format(
project_name, project_version, self.version_suffix, self.distribution,
self.architecture)
return not os.path.exists(changes_filename)
def Clean(self, source_helper_object):
"""Cleans the dpkg packages in the current directory.
Args:
source_helper_object (SourceHelper): source helper.
"""
project_version = source_helper_object.GetProjectVersion()
self._RemoveOlderOriginalSourcePackage(
source_helper_object.project_name, project_version,
version_suffix=self.version_suffix, distribution=self.distribution)
self._RemoveOlderSourceDPKGPackages(
source_helper_object.project_name, project_version)
|
{
"content_hash": "a8d8029aa9858e579cd7741d8773cc8e",
"timestamp": "",
"source": "github",
"line_count": 879,
"max_line_length": 80,
"avg_line_length": 34.10238907849829,
"alnum_prop": 0.6677341873498799,
"repo_name": "rgayon/l2tdevtools",
"id": "21bbdfec6b975a7a0fd952c7baaac98eafa96506",
"size": "30000",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "l2tdevtools/build_helpers/dpkg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "1333"
},
{
"name": "PowerShell",
"bytes": "1635"
},
{
"name": "Python",
"bytes": "519038"
},
{
"name": "Shell",
"bytes": "14276"
}
],
"symlink_target": ""
}
|
"""
This Bot uses the Updater class to handle the bot.
First, a few handler functions are defined. Then, those functions are passed to
the Dispatcher and registered at their respective places.
Then, the bot is started and runs until we press Ctrl-C on the command line.
Usage:
Basic Echobot example, repeats messages.
Press Ctrl-C on the command line or send a signal to the process to stop the
bot.
"""
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from logger import *
import urllib.request as req
import director
import argparse
dire = director.Director()
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def start(bot, update):
update.message.reply_text('Hi!')
def help(bot, update):
update.message.reply_text('Help!')
def echo(bot, update):
update.message.reply_text(update.message.text)
print(update.message.text)
def get_document(bot, update):
doc = update.message.document
if doc.mime_type != "audio/midi" and doc.mime_type != "audio/x-midi":
update.message.reply_text("Els meus músics només poden tocar música en MIDI! Els fitxers " + doc.mime_type +
" no sé com s'han d'interpretar i m'acabo tornant boig!")
return
f = bot.getFile(doc.file_id)
tmp_file = req.urlretrieve(f.file_path)
update.message.reply_text("🎵🎵 Ara tocaré la peça " + doc.file_name + " 🎵🎵")
play_midi(tmp_file[0])
update.message.reply_text("Ja he acabat de tocar la peça " + doc.file_name + " 😁")
def play_midi(file_path):
dire.play(file_path)
def error(bot, update, error):
logging.warning('Update "%s" caused error "%s"' % (update, error))
def main():
set_default_logger("bot.log")
parser = argparse.ArgumentParser(description="Start bot to play music with Pi Orchestra")
parser.add_argument("-d", "--debug", action="store_true", help="Print debug information")
args = parser.parse_args()
if args.debug:
logging.getLogger().setLevel(logging.DEBUG)
logging.debug("Starting in DEBUG mode")
# Create the EventHandler and pass it your bot's token.
updater = Updater("321792127:AAHE9cK06THBoeJFJav07ZwAYKFNKAmWZ9w")
# Get the dispatcher to register handlers
dp = updater.dispatcher
# on different commands - answer in Telegram
dp.add_handler(CommandHandler("start", start))
dp.add_handler(CommandHandler("help", help))
# on non-command i.e message - echo the message on Telegram
dp.add_handler(MessageHandler(Filters.text, echo))
dp.add_handler(MessageHandler(Filters.document, get_document))
# log all errors
dp.add_error_handler(error)
# Start the Bot
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
|
{
"content_hash": "1a67d6ff99e0c7ee0968de72363f824e",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 117,
"avg_line_length": 32.463157894736845,
"alnum_prop": 0.6968223086900129,
"repo_name": "jmigual/projecte2",
"id": "04472a37039e15fcf5bb3b01afb0bc641db483ed",
"size": "3269",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Code/bot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "34224"
},
{
"name": "Shell",
"bytes": "888"
},
{
"name": "TeX",
"bytes": "29898"
}
],
"symlink_target": ""
}
|
"""
websocket - WebSocket client library for Python
Copyright (C) 2010 Hiroki Ohtani(liris)
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1335 USA
"""
import array
import os
import struct
import six
from ._exceptions import *
from ._utils import validate_utf8
try:
# If wsaccel is available we use compiled routines to mask data.
from wsaccel.xormask import XorMaskerSimple
def _mask(_m, _d):
return XorMaskerSimple(_m).process(_d)
except ImportError:
# wsaccel is not available, we rely on python implementations.
def _mask(_m, _d):
for i in range(len(_d)):
_d[i] ^= _m[i % 4]
if six.PY3:
return _d.tobytes()
else:
return _d.tostring()
__all__ = [
'ABNF', 'continuous_frame', 'frame_buffer',
'STATUS_NORMAL',
'STATUS_GOING_AWAY',
'STATUS_PROTOCOL_ERROR',
'STATUS_UNSUPPORTED_DATA_TYPE',
'STATUS_STATUS_NOT_AVAILABLE',
'STATUS_ABNORMAL_CLOSED',
'STATUS_INVALID_PAYLOAD',
'STATUS_POLICY_VIOLATION',
'STATUS_MESSAGE_TOO_BIG',
'STATUS_INVALID_EXTENSION',
'STATUS_UNEXPECTED_CONDITION',
'STATUS_BAD_GATEWAY',
'STATUS_TLS_HANDSHAKE_ERROR',
]
# closing frame status codes.
STATUS_NORMAL = 1000
STATUS_GOING_AWAY = 1001
STATUS_PROTOCOL_ERROR = 1002
STATUS_UNSUPPORTED_DATA_TYPE = 1003
STATUS_STATUS_NOT_AVAILABLE = 1005
STATUS_ABNORMAL_CLOSED = 1006
STATUS_INVALID_PAYLOAD = 1007
STATUS_POLICY_VIOLATION = 1008
STATUS_MESSAGE_TOO_BIG = 1009
STATUS_INVALID_EXTENSION = 1010
STATUS_UNEXPECTED_CONDITION = 1011
STATUS_BAD_GATEWAY = 1014
STATUS_TLS_HANDSHAKE_ERROR = 1015
VALID_CLOSE_STATUS = (
STATUS_NORMAL,
STATUS_GOING_AWAY,
STATUS_PROTOCOL_ERROR,
STATUS_UNSUPPORTED_DATA_TYPE,
STATUS_INVALID_PAYLOAD,
STATUS_POLICY_VIOLATION,
STATUS_MESSAGE_TOO_BIG,
STATUS_INVALID_EXTENSION,
STATUS_UNEXPECTED_CONDITION,
STATUS_BAD_GATEWAY,
)
class ABNF(object):
"""
ABNF frame class.
see http://tools.ietf.org/html/rfc5234
and http://tools.ietf.org/html/rfc6455#section-5.2
"""
# operation code values.
OPCODE_CONT = 0x0
OPCODE_TEXT = 0x1
OPCODE_BINARY = 0x2
OPCODE_CLOSE = 0x8
OPCODE_PING = 0x9
OPCODE_PONG = 0xa
# available operation code value tuple
OPCODES = (OPCODE_CONT, OPCODE_TEXT, OPCODE_BINARY, OPCODE_CLOSE,
OPCODE_PING, OPCODE_PONG)
# opcode human readable string
OPCODE_MAP = {
OPCODE_CONT: "cont",
OPCODE_TEXT: "text",
OPCODE_BINARY: "binary",
OPCODE_CLOSE: "close",
OPCODE_PING: "ping",
OPCODE_PONG: "pong"
}
# data length threshold.
LENGTH_7 = 0x7e
LENGTH_16 = 1 << 16
LENGTH_63 = 1 << 63
def __init__(self, fin=0, rsv1=0, rsv2=0, rsv3=0,
opcode=OPCODE_TEXT, mask=1, data=""):
"""
Constructor for ABNF.
please check RFC for arguments.
"""
self.fin = fin
self.rsv1 = rsv1
self.rsv2 = rsv2
self.rsv3 = rsv3
self.opcode = opcode
self.mask = mask
if data is None:
data = ""
self.data = data
self.get_mask_key = os.urandom
def validate(self, skip_utf8_validation=False):
"""
validate the ABNF frame.
skip_utf8_validation: skip utf8 validation.
"""
if self.rsv1 or self.rsv2 or self.rsv3:
raise WebSocketProtocolException("rsv is not implemented, yet")
if self.opcode not in ABNF.OPCODES:
raise WebSocketProtocolException("Invalid opcode %r", self.opcode)
if self.opcode == ABNF.OPCODE_PING and not self.fin:
raise WebSocketProtocolException("Invalid ping frame.")
if self.opcode == ABNF.OPCODE_CLOSE:
l = len(self.data)
if not l:
return
if l == 1 or l >= 126:
raise WebSocketProtocolException("Invalid close frame.")
if l > 2 and not skip_utf8_validation and not validate_utf8(self.data[2:]):
raise WebSocketProtocolException("Invalid close frame.")
code = 256 * \
six.byte2int(self.data[0:1]) + six.byte2int(self.data[1:2])
if not self._is_valid_close_status(code):
raise WebSocketProtocolException("Invalid close opcode.")
@staticmethod
def _is_valid_close_status(code):
return code in VALID_CLOSE_STATUS or (3000 <= code < 5000)
def __str__(self):
return "fin=" + str(self.fin) \
+ " opcode=" + str(self.opcode) \
+ " data=" + str(self.data)
@staticmethod
def create_frame(data, opcode, fin=1):
"""
create frame to send text, binary and other data.
data: data to send. This is string value(byte array).
if opcode is OPCODE_TEXT and this value is unicode,
data value is converted into unicode string, automatically.
opcode: operation code. please see OPCODE_XXX.
fin: fin flag. if set to 0, create continue fragmentation.
"""
if opcode == ABNF.OPCODE_TEXT and isinstance(data, six.text_type):
data = data.encode("utf-8")
# mask must be set if send data from client
return ABNF(fin, 0, 0, 0, opcode, 1, data)
def format(self):
"""
format this object to string(byte array) to send data to server.
"""
if any(x not in (0, 1) for x in [self.fin, self.rsv1, self.rsv2, self.rsv3]):
raise ValueError("not 0 or 1")
if self.opcode not in ABNF.OPCODES:
raise ValueError("Invalid OPCODE")
length = len(self.data)
if length >= ABNF.LENGTH_63:
raise ValueError("data is too long")
frame_header = chr(self.fin << 7
| self.rsv1 << 6 | self.rsv2 << 5 | self.rsv3 << 4
| self.opcode)
if length < ABNF.LENGTH_7:
frame_header += chr(self.mask << 7 | length)
frame_header = six.b(frame_header)
elif length < ABNF.LENGTH_16:
frame_header += chr(self.mask << 7 | 0x7e)
frame_header = six.b(frame_header)
frame_header += struct.pack("!H", length)
else:
frame_header += chr(self.mask << 7 | 0x7f)
frame_header = six.b(frame_header)
frame_header += struct.pack("!Q", length)
if not self.mask:
return frame_header + self.data
else:
mask_key = self.get_mask_key(4)
return frame_header + self._get_masked(mask_key)
def _get_masked(self, mask_key):
s = ABNF.mask(mask_key, self.data)
if isinstance(mask_key, six.text_type):
mask_key = mask_key.encode('utf-8')
return mask_key + s
@staticmethod
def mask(mask_key, data):
"""
mask or unmask data. Just do xor for each byte
mask_key: 4 byte string(byte).
data: data to mask/unmask.
"""
if data is None:
data = ""
if isinstance(mask_key, six.text_type):
mask_key = six.b(mask_key)
if isinstance(data, six.text_type):
data = six.b(data)
_m = array.array("B", mask_key)
_d = array.array("B", data)
return _mask(_m, _d)
class frame_buffer(object):
_HEADER_MASK_INDEX = 5
_HEADER_LENGTH_INDEX = 6
def __init__(self, recv_fn, skip_utf8_validation):
self.recv = recv_fn
self.skip_utf8_validation = skip_utf8_validation
# Buffers over the packets from the layer beneath until desired amount
# bytes of bytes are received.
self.recv_buffer = []
self.clear()
def clear(self):
self.header = None
self.length = None
self.mask = None
def has_received_header(self):
return self.header is None
def recv_header(self):
header = self.recv_strict(2)
b1 = header[0]
if six.PY2:
b1 = ord(b1)
fin = b1 >> 7 & 1
rsv1 = b1 >> 6 & 1
rsv2 = b1 >> 5 & 1
rsv3 = b1 >> 4 & 1
opcode = b1 & 0xf
b2 = header[1]
if six.PY2:
b2 = ord(b2)
has_mask = b2 >> 7 & 1
length_bits = b2 & 0x7f
self.header = (fin, rsv1, rsv2, rsv3, opcode, has_mask, length_bits)
def has_mask(self):
if not self.header:
return False
return self.header[frame_buffer._HEADER_MASK_INDEX]
def has_received_length(self):
return self.length is None
def recv_length(self):
bits = self.header[frame_buffer._HEADER_LENGTH_INDEX]
length_bits = bits & 0x7f
if length_bits == 0x7e:
v = self.recv_strict(2)
self.length = struct.unpack("!H", v)[0]
elif length_bits == 0x7f:
v = self.recv_strict(8)
self.length = struct.unpack("!Q", v)[0]
else:
self.length = length_bits
def has_received_mask(self):
return self.mask is None
def recv_mask(self):
self.mask = self.recv_strict(4) if self.has_mask() else ""
def recv_frame(self):
# Header
if self.has_received_header():
self.recv_header()
(fin, rsv1, rsv2, rsv3, opcode, has_mask, _) = self.header
# Frame length
if self.has_received_length():
self.recv_length()
length = self.length
# Mask
if self.has_received_mask():
self.recv_mask()
mask = self.mask
# Payload
payload = self.recv_strict(length)
if has_mask:
payload = ABNF.mask(mask, payload)
# Reset for next frame
self.clear()
frame = ABNF(fin, rsv1, rsv2, rsv3, opcode, has_mask, payload)
frame.validate(self.skip_utf8_validation)
return frame
def recv_strict(self, bufsize):
shortage = bufsize - sum(len(x) for x in self.recv_buffer)
while shortage > 0:
# Limit buffer size that we pass to socket.recv() to avoid
# fragmenting the heap -- the number of bytes recv() actually
# reads is limited by socket buffer and is relatively small,
# yet passing large numbers repeatedly causes lots of large
# buffers allocated and then shrunk, which results in
# fragmentation.
bytes_ = self.recv(min(16384, shortage))
self.recv_buffer.append(bytes_)
shortage -= len(bytes_)
unified = six.b("").join(self.recv_buffer)
if shortage == 0:
self.recv_buffer = []
return unified
else:
self.recv_buffer = [unified[bufsize:]]
return unified[:bufsize]
class continuous_frame(object):
def __init__(self, fire_cont_frame, skip_utf8_validation):
self.fire_cont_frame = fire_cont_frame
self.skip_utf8_validation = skip_utf8_validation
self.cont_data = None
self.recving_frames = None
def validate(self, frame):
if not self.recving_frames and frame.opcode == ABNF.OPCODE_CONT:
raise WebSocketProtocolException("Illegal frame")
if self.recving_frames and \
frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
raise WebSocketProtocolException("Illegal frame")
def add(self, frame):
if self.cont_data:
self.cont_data[1] += frame.data
else:
if frame.opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
self.recving_frames = frame.opcode
self.cont_data = [frame.opcode, frame.data]
if frame.fin:
self.recving_frames = None
def is_fire(self, frame):
return frame.fin or self.fire_cont_frame
def extract(self, frame):
data = self.cont_data
self.cont_data = None
frame.data = data[1]
if not self.fire_cont_frame and data[0] == ABNF.OPCODE_TEXT and not self.skip_utf8_validation and not validate_utf8(frame.data):
raise WebSocketPayloadException(
"cannot decode: " + repr(frame.data))
return [data[0], frame]
|
{
"content_hash": "1e2fca47b37f85c663e758e4d8e10a17",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 136,
"avg_line_length": 30.687203791469194,
"alnum_prop": 0.587027027027027,
"repo_name": "jarv/cmdchallenge-site",
"id": "eb07536d8c3cfacf3b8417117a5b45c13e9f8425",
"size": "12950",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "lambda_src/runcmd/websocket/_abnf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "126532"
},
{
"name": "HCL",
"bytes": "18422"
},
{
"name": "HTML",
"bytes": "8985"
},
{
"name": "JavaScript",
"bytes": "62058"
},
{
"name": "Makefile",
"bytes": "212"
},
{
"name": "Perl",
"bytes": "3915"
},
{
"name": "Python",
"bytes": "1855146"
},
{
"name": "Shell",
"bytes": "5758"
}
],
"symlink_target": ""
}
|
def cross(A, B):
"Cross product of elements in A and elements in B."
return [a+b for a in A for b in B]
digits = '123456789'
rows = 'ABCDEFGHI'
cols = digits
squares = cross(rows, cols)
unitlist = ([cross(rows, c) for c in cols] +
[cross(r, cols) for r in rows] +
[cross(rs, cs) for rs in ('ABC','DEF','GHI') for cs in ('123','456','789')])
units = dict((s, [u for u in unitlist if s in u])
for s in squares)
peers = dict((s, set(sum(units[s],[]))-set([s]))
for s in squares)
def test():
"A set of unit tests."
assert(len(squares) == 81)
assert(len(unitlist) == 27)
assert(all(len(units[s]) == 3 for s in squares))
assert(all(len(peers[s]) == 20 for s in squares))
assert(units['C2'] == [['A2', 'B2', 'C2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2'],
['C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9'],
['A1', 'A2', 'A3', 'B1', 'B2', 'B3', 'C1', 'C2', 'C3']])
assert(peers['C2'] == set(['A2', 'B2', 'D2', 'E2', 'F2', 'G2', 'H2', 'I2',
'C1', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9',
'A1', 'A3', 'B1', 'B3']))
print('All tests pass.')
test()
def grid_values(grid):
"Convert grid into a dict of {square: char} with '0' or '.' for empties."
chars = [c for c in grid if c in digits or c in '0.']
assert(len(chars) == 81)
return dict(zip(squares, chars))
def parse_grid(grid):
"""Convert grid to a dict of possible values, {square: digits}, or
return False if a contradiction is detected."""
## To start, every square can be any digit; then assign values from the grid.
values = dict((s, digits) for s in squares)
for s,d in grid_values(grid).items():
if d in digits and not assign(values, s, d):
return False ## (Fail if we can't assign d to square s.)
return values
def assign(values, s, d):
"""Eliminate all the other values (except d) from values[s] and propagate.
Return values, except return False if a contradiction is detected."""
other_values = values[s].replace(d, '')
if all(eliminate(values, s, d2) for d2 in other_values):
return values
else:
return False
def eliminate(values, s, d):
"""Eliminate d from values[s]; propagate when values or places <= 2.
Return values, except return False if a contradiction is detected."""
if d not in values[s]:
return values ## Already eliminated
values[s] = values[s].replace(d,'')
## (1) If a square s is reduced to one value d2, then eliminate d2 from the peers.
if len(values[s]) == 0:
return False ## Contradiction: removed last value
elif len(values[s]) == 1:
d2 = values[s]
if not all(eliminate(values, s2, d2) for s2 in peers[s]):
return False
## (2) If a unit u is reduced to only one place for a value d, then put it there.
for u in units[s]:
dplaces = [s for s in u if d in values[s]]
if len(dplaces) == 0:
return False ## Contradiction: no place for this value
elif len(dplaces) == 1:
# d can only be in one place in unit; assign it there
if not assign(values, dplaces[0], d):
return False
return values
def solve(grid): return search(parse_grid(grid))
def search(values):
"Using depth-first search and propagation, try all possible values."
if values is False:
return False ## Failed earlier
if all(len(values[s]) == 1 for s in squares):
return values ## Solved!
## Chose the unfilled square s with the fewest possibilities
n,s = min((len(values[s]), s) for s in squares if len(values[s]) > 1)
return some(search(assign(values.copy(), s, d))
for d in values[s])
def some(seq):
"Return some element of seq that is true."
for e in seq:
if e: return e
return False
puzzles = []
current = ''
with open('p096_sudoku.txt') as f:
for row in f.readlines():
if len(row) == 10:
current += row
elif len(row) == 9:
current += row + '\n'
puzzles.append(solve(current))
elif len(row) == 8:
if current:
puzzles.append(solve(current))
current = ''
print(sum(map(lambda x: int(x['A1'] + x['A2'] + x['A3']), puzzles)))
|
{
"content_hash": "ab40f661c157bcbce5fccc00d64cbea5",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 88,
"avg_line_length": 37.58974358974359,
"alnum_prop": 0.5625284220100045,
"repo_name": "jialing3/corner_cases",
"id": "2e72b1b3031dfbd3edd76e7032ab734175265842",
"size": "4432",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Relue/Eu96.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "22409"
},
{
"name": "Jupyter Notebook",
"bytes": "174621"
},
{
"name": "Python",
"bytes": "98659"
},
{
"name": "Scala",
"bytes": "749"
}
],
"symlink_target": ""
}
|
'''
Created on 26 May 2013
@author: lukasz.forynski
@brief: Implementation of the multi-key dictionary.
https://github.com/formiaczek/python_data_structures
___________________________________
Copyright (c) 2013 Lukasz Forynski <lukasz.forynski@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy of this
software and associated documentation files (the "Software"), to deal in the Software
without restriction, including without limitation the rights to use, copy, modify, merge,
publish, distribute, sub-license, and/or sell copies of the Software, and to permit persons
to whom the Software is furnished to do so, subject to the following conditions:
- The above copyright notice and this permission notice shall be included in all copies
or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR
PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE
FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
'''
class multi_key_dict(object):
""" Purpose of this type is to provie a multi-key dictionary.
This kind of dictionary has a similar interface to the standard dictionary, and indeed if used
with single key key elements - it's behaviour is the same as for a standard dict().
However it also allows for creation elements using multiple keys (using tuples/lists).
Such elements can be accessed using either of those keys (e.g read/updated/deleted).
Dictionary provides also extended interface for iterating over items and keys by the key type.
This can be useful e.g.: when creating dictionaries with (index,name) allowing to iterate over
items using either: names or indexes. It can be useful for many many other similar use-cases,
and there is no limit to the number of keys used to map to the value.
There are also methods to find other keys mapping to the same value as the specified keys etc.
Refer to examples and test code to see it in action.
simple example:
k = multi_key_dict()
k[100] = 'hundred' # add item to the dictionary (as for normal dictionary)
# but also:
# below creates entry with two possible key types: int and str,
# mapping all keys to the assigned value
k[1000, 'kilo', 'k'] = 'kilo (x1000)'
print k[1000] # will print 'kilo (x1000)'
print k['k'] # will also print 'kilo (x1000)'
# the same way objects can be updated, and if an object is updated using one key, the new value will
# be accessible using any other key, e.g. for example above:
k['kilo'] = 'kilo'
print k[1000] # will print 'kilo' as value was updated
"""
def __getitem__(self, key):
""" Return the value at index specified as key."""
if self.has_key(key):
return self.items_dict[self.__dict__[str(type(key))][key]]
else:
raise KeyError(key)
def __setitem__(self, keys, value):
""" Set the value at index (or list of indexes) specified as keys.
Note, that if multiple key list is specified, either:
- none of keys should map to an existing item already (item creation), or
- all of keys should map to exactly the same item (as previously created)
(item update)
If this is not the case - KeyError is raised. """
if(type(keys) in [tuple, list]):
num_of_keys_we_have = reduce(lambda x, y: x+y, map(lambda x : self.has_key(x), keys))
if num_of_keys_we_have:
all_select_same_item = True
direct_key = None
for key in keys:
key_type = str(type(key))
try:
if not direct_key:
direct_key = self.__dict__[key_type][key]
else:
new = self.__dict__[key_type][key]
if new != direct_key:
all_select_same_item = False
break
except Exception, err:
all_select_same_item = False
break;
if not all_select_same_item:
raise KeyError(', '.join(str(key) for key in keys))
first_key = keys[0] # combination if keys is allowed, simply use the first one
else:
first_key = keys
key_type = str(type(first_key)) # find the intermediate dictionary..
if self.has_key(first_key):
self.items_dict[self.__dict__[key_type][first_key]] = value # .. and update the object if it exists..
else:
if(type(keys) not in [tuple, list]):
key = keys
keys = [keys]
self.__add_item(value, keys) # .. or create it - if it doesn't
def __delitem__(self, key):
""" Called to implement deletion of self[key]."""
key_type = str(type(key))
if (self.has_key(key) and
self.items_dict and
self.items_dict.has_key(self.__dict__[key_type][key])):
intermediate_key = self.__dict__[key_type][key]
# remove the item in main dictionary
del self.items_dict[intermediate_key]
# remove all references (also pointed by other types of keys)
# for the item that this key pointed to.
for name, reference_dict in self.__dict__.iteritems():
if(type(name) == str and name.find('<type') == 0):
ref_key = None
for temp_key, value in reference_dict.iteritems():
if value == intermediate_key:
ref_key = temp_key
break
if ref_key:
del reference_dict[ref_key]
else:
raise KeyError(key)
def has_key(self, key):
""" Returns True if this object contains an item referenced by the key."""
key_type = str(type(key))
if self.__dict__.has_key(key_type):
if self.__dict__[key_type].has_key(key):
return True
return False
def get_other_keys(self, key, including_current=False):
""" Returns list of other keys that are mapped to the same value as specified key.
@param key - key for which other keys should be returned.
@param including_current if set to True - key will also appear on this list."""
other_keys = []
if self.has_key(key):
intermediate_key = self.__dict__[str(type(key))][key]
other_keys.extend(self.__all_keys_from_intermediate_key(intermediate_key))
if not including_current:
other_keys.remove(key)
return other_keys
def iteritems(self, key_type=None, return_all_keys=False):
""" Returns an iterator over the dictionary's (key, value) pairs.
@param key_type if specified, iterator will be returning only (key,value) pairs for this type of key.
Otherwise (if not specified) ((keys,...), value)
i.e. (tuple of keys, values) pairs for all items in this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
if key_type is not None:
key = str(key_type)
if self.__dict__.has_key(key):
for key, intermediate_key in self.__dict__[key].iteritems():
if return_all_keys:
keys = self.__all_keys_from_intermediate_key(intermediate_key)
yield keys, self.items_dict[intermediate_key]
else:
yield key, self.items_dict[intermediate_key]
else:
for multi_key_type, value in self.items_dict.iteritems():
keys = self.__all_keys_from_intermediate_key(multi_key_type)
yield keys, value
def iterkeys(self, key_type=None, return_all_keys=False):
""" Returns an iterator over the dictionary's keys.
@param key_type if specified, iterator for a dictionary of this type will be used.
Otherwise (if not specified) tuples containing all (multiple) keys
for this dictionary will be generated.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
if(key_type is not None):
the_key = str(key_type)
if self.__dict__.has_key(the_key):
for key in self.__dict__[the_key].iterkeys():
if return_all_keys:
intermediate_key = self.__dict__[the_key][key]
keys = self.__all_keys_from_intermediate_key(intermediate_key)
yield keys
else:
yield key
else:
for multi_key_type in self.items_dict.keys():
yield self.__all_keys_from_intermediate_key(multi_key_type)
def itervalues(self, key_type=None):
""" Returns an iterator over the dictionary's values.
@param key_type if specified, iterator will be returning only values pointed by keys of this type.
Otherwise (if not specified) all values in this dictinary will be generated."""
if(key_type is not None):
intermediate_key = str(key_type)
if self.__dict__.has_key(intermediate_key):
for direct_key in self.__dict__[intermediate_key].itervalues():
yield self.items_dict[direct_key]
else:
for value in self.items_dict.itervalues():
yield value
def items(self, key_type=None, return_all_keys=False):
""" Return a copy of the dictionary's list of (key, value) pairs.
@param key_type if specified, (key, value) pairs for keys of this type will be returned.
Otherwise list of pairs: ((keys), value) for all items will be returned.
@param return_all_keys if set to True - tuple of keys is retuned instead of a key of this type."""
all_items = []
if key_type is not None:
keys_used_so_far = set()
direct_key = str(key_type)
if self.__dict__.has_key(direct_key):
for key, intermediate_key in self.__dict__[direct_key].iteritems():
if not intermediate_key in keys_used_so_far:
keys_used_so_far.add(intermediate_key)
if return_all_keys:
keys = self.__all_keys_from_intermediate_key(intermediate_key)
all_items.append((keys, self.items_dict[intermediate_key]))
else:
all_items.append((key, self.items_dict[intermediate_key]))
else:
for multi_key_type, value in self.items_dict.iteritems():
all_items.append((self.__all_keys_from_intermediate_key(multi_key_type), value))
return all_items
def keys(self, key_type=None):
""" Returns a copy of the dictionary's keys.
@param key_type if specified, only keys for this type will be returned.
Otherwise list of tuples containing all (multiple) keys will be returned."""
if key_type is not None:
intermediate_key = str(key_type)
if self.__dict__.has_key(intermediate_key):
return self.__dict__[intermediate_key].keys()
else:
# keys will contain lists of keys
all_keys = []
for multi_key_type in self.items_dict.keys():
all_keys.append(self.__all_keys_from_intermediate_key(multi_key_type))
return all_keys
def values(self, key_type=None):
""" Returns a copy of the dictionary's values.
@param key_type if specified, only values pointed by keys of this type will be returned.
Otherwise list of all values contained in this dictionary will be returned."""
if(key_type is not None):
all_items = []
keys_used = set()
direct_key = str(key_type)
if self.__dict__.has_key(direct_key):
for intermediate_key in self.__dict__[direct_key].itervalues():
if not intermediate_key in keys_used:
all_items.append(self.items_dict[intermediate_key])
keys_used.add(intermediate_key)
return all_items
else:
return self.items_dict.values()
def __len__(self):
""" Returns number of objects in dictionary."""
length = 0
if self.__dict__.has_key('items_dict'):
length = len(self.items_dict)
return length
def __add_item(self, item, keys=None):
""" Internal method to add an item to the multi-key dictionary"""
if(not keys or not len(keys)):
raise Exception('Error in %s.__add_item(%s, keys=tuple/list of items): need to specify a tuple/list containing at least one key!'
% (self.__class__.__name__, str(item)))
# joined values of keys will be used as a direct key. We'll encode type and key too..
direct_key = '`'.join([key.__class__.__name__+':' +str(key) for key in keys])
for key in keys:
key_type = str(type(key))
# store direct key as a value in an intermediate dictionary
if(not self.__dict__.has_key(key_type)):
self.__setattr__(key_type, dict())
self.__dict__[key_type][key] = direct_key
# store the value in the actual dictionary
if(not self.__dict__.has_key('items_dict')):
self.items_dict = dict()
self.items_dict[direct_key] = item
def __all_keys_from_intermediate_key(self, intermediate_key):
""" Internal method to find the tuple containing multiple keys"""
keys = []
# since we're trying to reverse-find keys for a value in number of dicts,
# (which is far from optimal, but re-creating objects from the intermediate keys
# doesn't work for more complex types loaded from sub-modules) - at least we'll
# try do that only for a correct dictionary (and not all of them)
key_types = set([tv.split(':', 1)[0] for tv in intermediate_key.split('`')])
is_correct_dict = lambda key: True in [str(key).startswith('<type \'%s' % k) for k in key_types]
for key, val in self.__dict__.items():
if type(val) == dict and is_correct_dict(key):
keys.extend([k for k, v in val.items() if v == intermediate_key])
return(tuple(keys))
def get(self, key, default=None):
""" Return the value at index specified as key."""
if self.has_key(key):
return self.items_dict[self.__dict__[str(type(key))][key]]
else:
return default
def __str__(self):
items = []
str_repr = lambda x: '\'%s\'' % x if type(x) == str else str(x)
if hasattr(self, 'items_dict'):
for (keys, value) in self.items():
keys_str = [str_repr(k) for k in keys]
items.append('(%s): %s' % (', '.join(keys_str),
str_repr(value)))
dict_str = '{%s}' % ( ', '.join(items))
return dict_str
def test_multi_key_dict():
contains_all = lambda cont, in_items: not (False in [c in cont for c in in_items])
m = multi_key_dict()
assert( len(m) == 0 ), 'expected len(m) == 0'
all_keys = list()
m['aa', 12, 32, 'mmm'] = 123 # create a value with multiple keys..
assert( len(m) == 1 ), 'expected len(m) == 1'
all_keys.append(('aa', 'mmm', 32, 12)) # store it for later
# try retrieving other keys mapped to the same value using one of them
res = m.get_other_keys('aa')
assert(contains_all(res, ['mmm', 32, 12])), 'get_other_keys(\'aa\'): %s other than expected: %s ' % (m, ['mmm', 32, 12])
# try retrieving other keys mapped to the same value using one of them: also include this key
res = m.get_other_keys(32, True)
assert(contains_all(res, ['aa', 'mmm', 32, 12])), 'get_other_keys(32): %s other than expected: %s ' % (res, ['aa', 'mmm', 32, 12])
assert( m.has_key('aa') == True ), 'expected m.has_key(\'aa\') == True'
assert( m.has_key('aab') == False ), 'expected m.has_key(\'aab\') == False'
assert( m.has_key(12) == True ), 'expected m.has_key(12) == True'
assert( m.has_key(13) == False ), 'expected m.has_key(13) == False'
assert( m.has_key(32) == True ), 'expected m.has_key(32) == True'
m['something else'] = 'abcd'
assert( len(m) == 2 ), 'expected len(m) == 2'
all_keys.append(('something else',)) # store for later
m[23] = 0
assert( len(m) == 3 ), 'expected len(m) == 3'
all_keys.append((23,)) # store for later
# check if it's possible to read this value back using either of keys
assert( m['aa'] == 123 ), 'expected m[\'aa\'] == 123'
assert( m[12] == 123 ), 'expected m[12] == 123'
assert( m[32] == 123 ), 'expected m[32] == 123'
assert( m['mmm'] == 123 ), 'expected m[\'mmm\'] == 123'
# now update value and again - confirm it back - using different keys..
m['aa'] = 45
assert( m['aa'] == 45 ), 'expected m[\'aa\'] == 45'
assert( m[12] == 45 ), 'expected m[12] == 45'
assert( m[32] == 45 ), 'expected m[32] == 45'
assert( m['mmm'] == 45 ), 'expected m[\'mmm\'] == 45'
m[12] = '4'
assert( m['aa'] == '4' ), 'expected m[\'aa\'] == \'4\''
assert( m[12] == '4' ), 'expected m[12] == \'4\''
# test __str__
m_str_exp = '{(23): 0, (\'aa\', \'mmm\', 32, 12): \'4\', (\'something else\'): \'abcd\'}'
m_str = str(m)
assert (len(m_str) > 0), 'str(m) should not be empty!'
assert (m_str[0] == '{'), 'str(m) should start with \'{\', but does with \'%c\'' % m_str[0]
assert (m_str[-1] == '}'), 'str(m) should end with \'}\', but does with \'%c\'' % m_str[-1]
# check if all key-values are there as expected. THey might be sorted differently
def get_values_from_str(dict_str):
sorted_keys_and_value = []
for k in dict_str.split(', ('):
keys, val = k.strip('{}() ').replace(')', '').split(':')
keys = sorted([k.strip() for k in keys.split(',')])
sorted_keys_and_value.append((keys, val))
return sorted_keys_and_value
exp = get_values_from_str(m_str_exp)
act = get_values_from_str(m_str)
assert (contains_all(act, exp)), 'str(m) values: \'{0}\' are not {1} '.format(act, exp)
# try accessing / creating new (keys)-> value mapping whilst one of these
# keys already maps to a value in this dictionarys
try:
m['aa', 'bb'] = 'something new'
assert(False), 'Should not allow adding multiple-keys when one of keys (\'aa\') already exists!'
except KeyError, err:
pass
# now check if we can get all possible keys (formed in a list of tuples
# each tuple containing all keys)
res = sorted([sorted(k) for k in m.keys()])
all_keys = sorted([sorted(k) for k in all_keys])
assert(contains_all(res, all_keys)), 'unexpected values from m.keys(), got:\n%s\n expected:\n%s)' %(res, all_keys)
# check default iteritems (which will unpack tupe with key(s) and value)
all_keys = [sorted(k) for k in all_keys]
num_of_elements = 0
for keys, value in m.iteritems():
num_of_elements += 1
assert(sorted(keys) in all_keys), 'm.iteritems(): unexpected keys: %s' % (keys)
assert(m[keys[0]] == value), 'm.iteritems(): unexpected value: %s (keys: %s)' % (value, keys)
assert(num_of_elements > 0), 'm.iteritems() returned generator that did not produce anything'
# test default iterkeys()
num_of_elements = 0
for keys in m.iterkeys():
num_of_elements += 1
assert(sorted(keys) in all_keys), 'm.iterkeys(): unexpected keys: %s' % (keys)
assert(num_of_elements > 0), 'm.iterkeys() returned generator that did not produce anything'
# test iterkeys(int, True): useful to get all info from the dictionary
# dictionary is iterated over the type specified, but all keys are returned.
num_of_elements = 0
for keys in m.iterkeys(int, True):
num_of_elements += 1
assert(sorted(keys) in all_keys), 'm.iterkeys(int, True): unexpected keys: %s' % (keys)
assert(num_of_elements > 0), 'm.iterkeys(int, True) returned generator that did not produce anything'
# test values for different types of keys()
values_for_int_keys = sorted([0, '4'])
assert (sorted(m.values(int)) == values_for_int_keys), 'm.values(int) are %s, but expected: %s.' % (sorted(m.values(int)),
values_for_int_keys)
values_for_str_keys = sorted(['4', 'abcd'])
assert (sorted(m.values(str)) == values_for_str_keys), 'm.values(str) are %s, but expected: %s.' % (sorted(m.values(str)),
values_for_str_keys)
current_values = sorted([0, '4', 'abcd']) # default (should give all values)
assert (sorted(m.values()) == current_values), 'm.values() are %s, but expected: %s.' % (sorted(m.values()),
current_values)
#test itervalues() (default) - should return all values. (Itervalues for other types are tested below)
vals = []
for value in m.itervalues():
vals.append(value)
assert (current_values == sorted(vals)), 'itervalues(): expected %s, but collected %s' % (current_values, sorted(vals))
#test items(int)
items_for_int = sorted([(32, '4'), (23, 0)])
assert (items_for_int == sorted(m.items(int))), 'items(int): expected %s, but collected %s' % (items_for_int,
sorted(m.items(int)))
# test items(str)
items_for_str = sorted([('aa', '4'), ('something else', 'abcd')])
assert (items_for_str == sorted(m.items(str))), 'items(str): expected %s, but collected %s' % (items_for_str,
sorted(m.items(str)))
# test items() (default - all items)
all_items = [((('aa', 'mmm', 32, 12), '4')), (('something else',), 'abcd'), ((23,), 0)]
all_items = sorted([sorted(k) for k in [sorted(kk) for kk in all_items]])
res = sorted([sorted(k) for k in m.items()])
assert (all_items == res), 'items() (all items): expected %s,\n\t\t\t\tbut collected %s' % (all_items, res)
# now test deletion..
curr_len = len(m)
del m[12]
assert( len(m) == curr_len - 1 ), 'expected len(m) == %d' % (curr_len - 1)
# try again
try:
del m['aa']
assert(False), 'cant remove again: item m[\'aa\'] should not exist!'
except KeyError, err:
pass
# try to access non-existing
try:
k = m['aa']
assert(False), 'removed item m[\'aa\'] should exist!'
except KeyError, err:
pass
# try to access non-existing with a different key
try:
k = m[12]
assert(False), 'removed item m[12] should exist!'
except KeyError, err:
pass
# prepare for other tests (also testing creation of new items)
tst_range = range(10, 40) + range(50, 70)
for i in tst_range:
m[i] = i # will create a dictionary, where keys are same as items
# test iteritems()
for key, value in m.iteritems(int):
assert(key == value), 'iteritems(int): expected %d, but received %d' % (key, value)
# test iterkeys()
num_of_elements = 0
curr_index_in_range = 0
for key in m.iterkeys(int):
expected = tst_range[curr_index_in_range]
assert (key == expected), 'iterkeys(int): expected %d, but received %d' % (expected, key)
curr_index_in_range += 1
num_of_elements += 1
assert(num_of_elements > 0), 'm.iteritems(int) returned generator that did not produce anything'
#test itervalues(int)
curr_index_in_range = 0
num_of_elements = 0
for value in m.itervalues(int):
expected = tst_range[curr_index_in_range]
assert (value == expected), 'itervalues(int): expected %d, but received %d' % (expected, value)
curr_index_in_range += 1
num_of_elements += 1
assert(num_of_elements > 0), 'm.itervalues(int) returned generator that did not produce anything'
# test values(int)
assert (m.values(int) == tst_range), 'm.values(int) is not as expected.'
# test keys()
assert (m.keys(int) == tst_range), 'm.keys(int) is not as expected.'
# test setitem with multiple keys
m['xy', 999, 'abcd'] = 'teststr'
try:
m['xy', 998] = 'otherstr'
assert(False), 'creating / updating m[\'xy\', 998] should fail!'
except KeyError, err:
pass
# test setitem with multiple keys
m['cd'] = 'somethingelse'
try:
m['cd', 999] = 'otherstr'
assert(False), 'creating / updating m[\'cd\', 999] should fail!'
except KeyError, err:
pass
m['xy', 999] = 'otherstr'
assert (m['xy'] == 'otherstr'), 'm[\'xy\'] is not as expected.'
assert (m[999] == 'otherstr'), 'm[999] is not as expected.'
assert (m['abcd'] == 'otherstr'), 'm[\'abcd\'] is not as expected.'
m['abcd', 'xy'] = 'another'
assert (m['xy'] == 'another'), 'm[\'xy\'] is not == \'another\'.'
assert (m[999] == 'another'), 'm[999] is not == \'another\''
assert (m['abcd'] == 'another'), 'm[\'abcd\'] is not == \'another\'.'
# test get functionality of basic dictionaries
m['CanIGet'] = 'yes'
assert (m.get('CanIGet') == 'yes')
assert (m.get('ICantGet') == None)
assert (m.get('ICantGet', "Ok") == "Ok")
k = multi_key_dict()
k['1:12', 1] = 'key_has_:'
k.items() # should not cause any problems to have : in key
assert (k[1] == 'key_has_:'), 'k[1] is not equal to \'abc:def:ghi\''
import datetime
n = datetime.datetime.now()
l = multi_key_dict()
l[n] = 'now' # use datetime obj as a key
#test keys..
r = l.keys()[0]
assert(r == (n,)), 'Expected {0} (tuple with all key types) as a 1st key, but got: {1}'.format((n,), r)
r = l.keys(datetime.datetime)[0]
assert(r == n), 'Expected {0} as a key, but got: {1}'.format(n, r)
assert(l.values() == ['now']), 'Expected values: {0}, but got: {1}'.format(l.values(), 'now')
# test items..
exp_items = [((n,), 'now')]
r = l.items()
assert(r == exp_items), 'Expected for items(): tuple of keys: {0}, but got: {1}'.format(r, exp_items)
assert(exp_items[0][1] == 'now'), 'Expected for items(): value: {0}, but got: {1}'.format('now',
exp_items[0][1])
print 'All test passed OK!'
if __name__ == '__main__':
try:
test_multi_key_dict()
except KeyboardInterrupt:
print '\n(interrupted by user)'
|
{
"content_hash": "a6f2c41bd860640f939b8f3bc9253763",
"timestamp": "",
"source": "github",
"line_count": 586,
"max_line_length": 141,
"avg_line_length": 47.525597269624576,
"alnum_prop": 0.5634829443447038,
"repo_name": "JFDesigner/MTG",
"id": "489ac2b8f8a7db6284db2c54638f3a430d247a81",
"size": "27850",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/mayaSnippet/multi_key_dict.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "152980"
}
],
"symlink_target": ""
}
|
import functools
import errno
import os
import resource
import signal
import time
import subprocess
import re
from swift import gettext_ as _
from swift.common.utils import search_tree, remove_file, write_file
SWIFT_DIR = '/etc/swift'
RUN_DIR = '/var/run/swift'
# auth-server has been removed from ALL_SERVERS, start it explicitly
ALL_SERVERS = ['account-auditor', 'account-server', 'container-auditor',
'container-replicator', 'container-server', 'container-sync',
'container-updater', 'object-auditor', 'object-server',
'object-expirer', 'object-replicator', 'object-updater',
'proxy-server', 'account-replicator', 'account-reaper']
MAIN_SERVERS = ['proxy-server', 'account-server', 'container-server',
'object-server']
REST_SERVERS = [s for s in ALL_SERVERS if s not in MAIN_SERVERS]
GRACEFUL_SHUTDOWN_SERVERS = MAIN_SERVERS + ['auth-server']
START_ONCE_SERVERS = REST_SERVERS
# These are servers that match a type (account-*, container-*, object-*) but
# don't use that type-server.conf file and instead use their own.
STANDALONE_SERVERS = ['object-expirer']
KILL_WAIT = 15 # seconds to wait for servers to die (by default)
WARNING_WAIT = 3 # seconds to wait after message that may just be a warning
MAX_DESCRIPTORS = 32768
MAX_MEMORY = (1024 * 1024 * 1024) * 2 # 2 GB
MAX_PROCS = 8192 # workers * disks * threads_per_disk, can get high
def setup_env():
"""Try to increase resource limits of the OS. Move PYTHON_EGG_CACHE to /tmp
"""
try:
resource.setrlimit(resource.RLIMIT_NOFILE,
(MAX_DESCRIPTORS, MAX_DESCRIPTORS))
except ValueError:
print _("WARNING: Unable to modify file descriptor limit. "
"Running as non-root?")
try:
resource.setrlimit(resource.RLIMIT_DATA,
(MAX_MEMORY, MAX_MEMORY))
except ValueError:
print _("WARNING: Unable to modify memory limit. "
"Running as non-root?")
try:
resource.setrlimit(resource.RLIMIT_NPROC,
(MAX_PROCS, MAX_PROCS))
except ValueError:
print _("WARNING: Unable to modify max process limit. "
"Running as non-root?")
# Set PYTHON_EGG_CACHE if it isn't already set
os.environ.setdefault('PYTHON_EGG_CACHE', '/tmp')
def command(func):
"""
Decorator to declare which methods are accessible as commands, commands
always return 1 or 0, where 0 should indicate success.
:param func: function to make public
"""
func.publicly_accessible = True
@functools.wraps(func)
def wrapped(*a, **kw):
rv = func(*a, **kw)
return 1 if rv else 0
return wrapped
def watch_server_pids(server_pids, interval=1, **kwargs):
"""Monitor a collection of server pids yielding back those pids that
aren't responding to signals.
:param server_pids: a dict, lists of pids [int,...] keyed on
Server objects
"""
status = {}
start = time.time()
end = start + interval
server_pids = dict(server_pids) # make a copy
while True:
for server, pids in server_pids.items():
for pid in pids:
try:
# let pid stop if it wants to
os.waitpid(pid, os.WNOHANG)
except OSError as e:
if e.errno not in (errno.ECHILD, errno.ESRCH):
raise # else no such child/process
# check running pids for server
status[server] = server.get_running_pids(**kwargs)
for pid in pids:
# original pids no longer in running pids!
if pid not in status[server]:
yield server, pid
# update active pids list using running_pids
server_pids[server] = status[server]
if not [p for server, pids in status.items() for p in pids]:
# no more running pids
break
if time.time() > end:
break
else:
time.sleep(0.1)
class UnknownCommandError(Exception):
pass
class Manager():
"""Main class for performing commands on groups of servers.
:param servers: list of server names as strings
"""
def __init__(self, servers, run_dir=RUN_DIR):
server_names = set()
for server in servers:
if server == 'all':
server_names.update(ALL_SERVERS)
elif server == 'main':
server_names.update(MAIN_SERVERS)
elif server == 'rest':
server_names.update(REST_SERVERS)
elif '*' in server:
# convert glob to regex
server_names.update([s for s in ALL_SERVERS if
re.match(server.replace('*', '.*'), s)])
else:
server_names.add(server)
self.servers = set()
for name in server_names:
self.servers.add(Server(name, run_dir))
@command
def status(self, **kwargs):
"""display status of tracked pids for server
"""
status = 0
for server in self.servers:
status += server.status(**kwargs)
return status
@command
def start(self, **kwargs):
"""starts a server
"""
setup_env()
status = 0
for server in self.servers:
server.launch(**kwargs)
if not kwargs.get('daemon', True):
for server in self.servers:
try:
status += server.interact(**kwargs)
except KeyboardInterrupt:
print _('\nuser quit')
self.stop(**kwargs)
break
elif kwargs.get('wait', True):
for server in self.servers:
status += server.wait(**kwargs)
return status
@command
def no_wait(self, **kwargs):
"""spawn server and return immediately
"""
kwargs['wait'] = False
return self.start(**kwargs)
@command
def no_daemon(self, **kwargs):
"""start a server interactively
"""
kwargs['daemon'] = False
return self.start(**kwargs)
@command
def once(self, **kwargs):
"""start server and run one pass on supporting daemons
"""
kwargs['once'] = True
return self.start(**kwargs)
@command
def stop(self, **kwargs):
"""stops a server
"""
server_pids = {}
for server in self.servers:
signaled_pids = server.stop(**kwargs)
if not signaled_pids:
print _('No %s running') % server
else:
server_pids[server] = signaled_pids
# all signaled_pids, i.e. list(itertools.chain(*server_pids.values()))
signaled_pids = [p for server, pids in server_pids.items()
for p in pids]
# keep track of the pids yeiled back as killed for all servers
killed_pids = set()
kill_wait = kwargs.get('kill_wait', KILL_WAIT)
for server, killed_pid in watch_server_pids(server_pids,
interval=kill_wait,
**kwargs):
print _("%s (%s) appears to have stopped") % (server, killed_pid)
killed_pids.add(killed_pid)
if not killed_pids.symmetric_difference(signaled_pids):
# all proccesses have been stopped
return 0
# reached interval n watch_pids w/o killing all servers
for server, pids in server_pids.items():
if not killed_pids.issuperset(pids):
# some pids of this server were not killed
print _('Waited %s seconds for %s to die; giving up') % (
kill_wait, server)
return 1
@command
def shutdown(self, **kwargs):
"""allow current requests to finish on supporting servers
"""
kwargs['graceful'] = True
status = 0
status += self.stop(**kwargs)
return status
@command
def restart(self, **kwargs):
"""stops then restarts server
"""
status = 0
status += self.stop(**kwargs)
status += self.start(**kwargs)
return status
@command
def reload(self, **kwargs):
"""graceful shutdown then restart on supporting servers
"""
kwargs['graceful'] = True
status = 0
for server in self.servers:
m = Manager([server.server])
status += m.stop(**kwargs)
status += m.start(**kwargs)
return status
@command
def force_reload(self, **kwargs):
"""alias for reload
"""
return self.reload(**kwargs)
def get_command(self, cmd):
"""Find and return the decorated method named like cmd
:param cmd: the command to get, a string, if not found raises
UnknownCommandError
"""
cmd = cmd.lower().replace('-', '_')
try:
f = getattr(self, cmd)
except AttributeError:
raise UnknownCommandError(cmd)
if not hasattr(f, 'publicly_accessible'):
raise UnknownCommandError(cmd)
return f
@classmethod
def list_commands(cls):
"""Get all publicly accessible commands
:returns: a list of string tuples (cmd, help), the method names who are
decorated as commands
"""
get_method = lambda cmd: getattr(cls, cmd)
return sorted([(x.replace('_', '-'), get_method(x).__doc__.strip())
for x in dir(cls) if
getattr(get_method(x), 'publicly_accessible', False)])
def run_command(self, cmd, **kwargs):
"""Find the named command and run it
:param cmd: the command name to run
"""
f = self.get_command(cmd)
return f(**kwargs)
class Server():
"""Manage operations on a server or group of servers of similar type
:param server: name of server
"""
def __init__(self, server, run_dir=RUN_DIR):
if '-' not in server:
server = '%s-server' % server
self.server = server.lower()
self.type = server.rsplit('-', 1)[0]
self.cmd = 'swift-%s' % server
self.procs = []
self.run_dir = run_dir
def __str__(self):
return self.server
def __repr__(self):
return "%s(%s)" % (self.__class__.__name__, repr(str(self)))
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
try:
return self.server == other.server
except AttributeError:
return False
def get_pid_file_name(self, conf_file):
"""Translate conf_file to a corresponding pid_file
:param conf_file: an conf_file for this server, a string
:returns: the pid_file for this conf_file
"""
return conf_file.replace(
os.path.normpath(SWIFT_DIR), self.run_dir, 1).replace(
'%s-server' % self.type, self.server, 1).replace(
'.conf', '.pid', 1)
def get_conf_file_name(self, pid_file):
"""Translate pid_file to a corresponding conf_file
:param pid_file: a pid_file for this server, a string
:returns: the conf_file for this pid_file
"""
if self.server in STANDALONE_SERVERS:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
'.pid', '.conf', 1)
else:
return pid_file.replace(
os.path.normpath(self.run_dir), SWIFT_DIR, 1).replace(
self.server, '%s-server' % self.type, 1).replace(
'.pid', '.conf', 1)
def conf_files(self, **kwargs):
"""Get conf files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of conf files
"""
if self.server in STANDALONE_SERVERS:
found_conf_files = search_tree(SWIFT_DIR, self.server + '*',
'.conf', dir_ext='.conf.d')
else:
found_conf_files = search_tree(SWIFT_DIR, '%s-server*' % self.type,
'.conf', dir_ext='.conf.d')
number = kwargs.get('number')
if number:
try:
conf_files = [found_conf_files[number - 1]]
except IndexError:
conf_files = []
else:
conf_files = found_conf_files
if not conf_files:
# maybe there's a config file(s) out there, but I couldn't find it!
if not kwargs.get('quiet'):
print _('Unable to locate config %sfor %s') % (
('number %s ' % number if number else ''), self.server)
if kwargs.get('verbose') and not kwargs.get('quiet'):
if found_conf_files:
print _('Found configs:')
for i, conf_file in enumerate(found_conf_files):
print ' %d) %s' % (i + 1, conf_file)
return conf_files
def pid_files(self, **kwargs):
"""Get pid files for this server
:param: number, if supplied will only lookup the nth server
:returns: list of pid files
"""
pid_files = search_tree(self.run_dir, '%s*' % self.server)
if kwargs.get('number', 0):
conf_files = self.conf_files(**kwargs)
# filter pid_files to match the index of numbered conf_file
pid_files = [pid_file for pid_file in pid_files if
self.get_conf_file_name(pid_file) in conf_files]
return pid_files
def iter_pid_files(self, **kwargs):
"""Generator, yields (pid_file, pids)
"""
for pid_file in self.pid_files(**kwargs):
yield pid_file, int(open(pid_file).read().strip())
def signal_pids(self, sig, **kwargs):
"""Send a signal to pids for this server
:param sig: signal to send
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
pids = {}
for pid_file, pid in self.iter_pid_files(**kwargs):
try:
if sig != signal.SIG_DFL:
print _('Signal %s pid: %s signal: %s') % (self.server,
pid, sig)
os.kill(pid, sig)
except OSError as e:
if e.errno == errno.ESRCH:
# pid does not exist
if kwargs.get('verbose'):
print _("Removing stale pid file %s") % pid_file
remove_file(pid_file)
elif e.errno == errno.EPERM:
print _("No permission to signal PID %d") % pid
else:
# process exists
pids[pid] = pid_file
return pids
def get_running_pids(self, **kwargs):
"""Get running pids
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.signal_pids(signal.SIG_DFL, **kwargs) # send noop
def kill_running_pids(self, **kwargs):
"""Kill running pids
:param graceful: if True, attempt SIGHUP on supporting servers
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
graceful = kwargs.get('graceful')
if graceful and self.server in GRACEFUL_SHUTDOWN_SERVERS:
sig = signal.SIGHUP
else:
sig = signal.SIGTERM
return self.signal_pids(sig, **kwargs)
def status(self, pids=None, **kwargs):
"""Display status of server
:param: pids, if not supplied pids will be populated automatically
:param: number, if supplied will only lookup the nth server
:returns: 1 if server is not running, 0 otherwise
"""
if pids is None:
pids = self.get_running_pids(**kwargs)
if not pids:
number = kwargs.get('number', 0)
if number:
kwargs['quiet'] = True
conf_files = self.conf_files(**kwargs)
if conf_files:
print _("%s #%d not running (%s)") % (self.server, number,
conf_files[0])
else:
print _("No %s running") % self.server
return 1
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
return 0
def spawn(self, conf_file, once=False, wait=True, daemon=True, **kwargs):
"""Launch a subprocess for this server.
:param conf_file: path to conf_file to use as first arg
:param once: boolean, add once argument to command
:param wait: boolean, if true capture stdout with a pipe
:param daemon: boolean, if true ask server to log to console
:returns : the pid of the spawned process
"""
args = [self.cmd, conf_file]
if once:
args.append('once')
if not daemon:
# ask the server to log to console
args.append('verbose')
# figure out what we're going to do with stdio
if not daemon:
# do nothing, this process is open until the spawns close anyway
re_out = None
re_err = None
else:
re_err = subprocess.STDOUT
if wait:
# we're going to need to block on this...
re_out = subprocess.PIPE
else:
re_out = open(os.devnull, 'w+b')
proc = subprocess.Popen(args, stdout=re_out, stderr=re_err)
pid_file = self.get_pid_file_name(conf_file)
write_file(pid_file, proc.pid)
self.procs.append(proc)
return proc.pid
def wait(self, **kwargs):
"""
wait on spawned procs to start
"""
status = 0
for proc in self.procs:
# wait for process to close its stdout
output = proc.stdout.read()
if output:
print output
start = time.time()
# wait for process to die (output may just be a warning)
while time.time() - start < WARNING_WAIT:
time.sleep(0.1)
if proc.poll() is not None:
status += proc.returncode
break
return status
def interact(self, **kwargs):
"""
wait on spawned procs to terminate
"""
status = 0
for proc in self.procs:
# wait for process to terminate
proc.communicate()
if proc.returncode:
status += 1
return status
def launch(self, **kwargs):
"""
Collect conf files and attempt to spawn the processes for this server
"""
conf_files = self.conf_files(**kwargs)
if not conf_files:
return []
pids = self.get_running_pids(**kwargs)
already_started = False
for pid, pid_file in pids.items():
conf_file = self.get_conf_file_name(pid_file)
# for legacy compat you can't start other servers if one server is
# already running (unless -n specifies which one you want), this
# restriction could potentially be lifted, and launch could start
# any unstarted instances
if conf_file in conf_files:
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, conf_file)
elif not kwargs.get('number', 0):
already_started = True
print _("%s running (%s - %s)") % (self.server, pid, pid_file)
if already_started:
print _("%s already started...") % self.server
return []
if self.server not in START_ONCE_SERVERS:
kwargs['once'] = False
pids = {}
for conf_file in conf_files:
if kwargs.get('once'):
msg = _('Running %s once') % self.server
else:
msg = _('Starting %s') % self.server
print '%s...(%s)' % (msg, conf_file)
try:
pid = self.spawn(conf_file, **kwargs)
except OSError as e:
if e.errno == errno.ENOENT:
#TODO(clayg): should I check if self.cmd exists earlier?
print _("%s does not exist") % self.cmd
break
else:
raise
pids[pid] = conf_file
return pids
def stop(self, **kwargs):
"""Send stop signals to pids for this server
:returns: a dict mapping pids (ints) to pid_files (paths)
"""
return self.kill_running_pids(**kwargs)
|
{
"content_hash": "e3f351e5b74ea6b71be4d09fa5ab88f8",
"timestamp": "",
"source": "github",
"line_count": 628,
"max_line_length": 79,
"avg_line_length": 33.85031847133758,
"alnum_prop": 0.5316586696772979,
"repo_name": "xiaoguoai/ec-dev-swift",
"id": "f345e28fec9abc760e3aedb1a906abee6b544b0a",
"size": "21853",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "swift/common/manager.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "15048"
},
{
"name": "Python",
"bytes": "3816353"
},
{
"name": "Shell",
"bytes": "2933"
}
],
"symlink_target": ""
}
|
f = CurrentFont()
g = CurrentGlyph()
layer = Glyphs.font.selectedLayers[0] # current layer
def draw_circle(xxx_todo_changeme, diameter):
(origin_x, origin_y) = xxx_todo_changeme
pen = g.getPen()
d = diameter #diameter
r = d / 2 #radius
h = r * 0.55229 #handle size
x0, y0 = origin_x, origin_y #origin
#since the pen starts at the first oncurve point, it needs to add a radius value to 'correct' the initial point
x_init = r + origin_x
y_init = origin_y - r
# oncurve points
x1, y1 = x_init, y_init
x2, y2 = x1 + r, y1 + r
x3, y3 = x1, y2 + r
x4, y4 = x3 - r, y2
pen.moveTo((x1, y1))
pen.curveTo((x1+h,y1), (x2,y2-h), (x2,y2))
pen.curveTo((x2,y2+h), (x3+h,y3), (x3,y3))
pen.curveTo((x3-h,y3), (x4,y4+h), (x4,y4))
pen.curveTo((x4,y4-h), (x1-h,y1), (x1,y1))
pen.closePath()
g.update()
# show all intersections with glyph at y=100
# intersections = layer.intersectionsBetweenPoints((-1000, 100), (layer.width+1000, 100))
# print(intersections)
#
# # left sidebearing at measurement line
# print(intersections[1].x)
#
# # right sidebearing at measurement line
# print(layer.width - intersections[-2].x)
# find the the x-height's middle point
font = Glyphs.font
selected_master = font.selectedFontMaster
x = selected_master.xHeight
half_x = x/2
# show all intersections with the glyph at y = half_x
intersections = layer.intersectionsBetweenPoints((-1000, half_x), (layer.width+1000, half_x))
# print len(intersections)
# ip = intersections[1].x, intersections[1].y
# sz = intersections[2].x - intersections[1].x
li = [] # list of intersections (excluding the lsb and rsb)
for i in range(len(intersections)):
if i == 0 or i == len(intersections)-1:
continue
else:
li.append((intersections[i].x, intersections[i].y))
new_list = zip(li,li[1:])[::2]
for v in range(len(new_list)):
p1 = new_list[v][0][0]
p2 = new_list[v][1][0] - p1
draw_circle(new_list[v][0], p2)
|
{
"content_hash": "53a63adf6b8058be78e9eaf3c340c03f",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 112,
"avg_line_length": 25.89189189189189,
"alnum_prop": 0.6649269311064718,
"repo_name": "filipenegrao/glyphsapp-scripts",
"id": "ab517c57b54723484324091a392b522484c553d9",
"size": "1916",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "in_progress/draw_circle_in_strokes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "235"
},
{
"name": "Python",
"bytes": "135574"
}
],
"symlink_target": ""
}
|
from flask import (
g,
Blueprint,
render_template,
current_app,
request,
url_for,
redirect,
session,
)
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
import packtools
from .forms import XMLUploadForm
from .utils import analyze_xml
main = Blueprint("main", __name__)
@main.before_app_request
def add_context_settings():
setattr(
g,
"SETTINGS_MAX_UPLOAD_SIZE",
current_app.config.get("SETTINGS_MAX_UPLOAD_SIZE"),
)
setattr(g, "PACKTOOLS_VERSION", current_app.config.get("PACKTOOLS_VERSION"))
@main.route("/", defaults={"path_file": ""})
@main.route("/<path:path_file>")
def packtools_home(path_file):
if not path_file:
return redirect(url_for("main.packtools_stylechecker"))
url_session = session.get("url_static_file", "http://localhost")
return redirect(urljoin(url_session, path_file))
@main.route("/stylechecker", methods=["GET", "POST"])
def packtools_stylechecker():
form = XMLUploadForm()
context = dict(form=form)
if form.validate_on_submit():
if form.add_scielo_br_rules.data:
extra_sch = packtools.catalogs.SCHEMAS["scielo-br"]
else:
extra_sch = None
results, exc = analyze_xml(form.file.data, extra_schematron=extra_sch)
context["results"] = results
context["xml_exception"] = (
exc and getattr(exc, "message", getattr(exc, "msg", str(exc))) or None
)
return render_template("validator/stylechecker.html", **context)
@main.route("/previews", methods=["GET", "POST"])
def packtools_preview_html():
form = XMLUploadForm()
context = dict(form=form)
if form.validate_on_submit():
session["url_static_file"] = form.url_static_file.data
previews = []
try:
for lang, html_output in packtools.HTMLGenerator.parse(
form.file.data,
valid_only=False,
css=url_for("static", filename="css/htmlgenerator/scielo-article.css"),
print_css=url_for(
"static", filename="css/htmlgenerator/scielo-bundle-print.css"
),
js=url_for("static", filename="js/htmlgenerator/scielo-article-min.js"),
):
previews.append({"lang": lang, "html": html_output})
except Exception as e:
# print(e.message)
# qualquer exeção aborta a pre-visualização mas continua com o resto
previews = []
context["previews"] = previews
return render_template("validator/preview_html.html", **context)
|
{
"content_hash": "ac2abcaf3a06bdf3f9a127c36732921c",
"timestamp": "",
"source": "github",
"line_count": 93,
"max_line_length": 88,
"avg_line_length": 28.612903225806452,
"alnum_prop": 0.6125516723036453,
"repo_name": "gustavofonseca/packtools",
"id": "36fd1e784a676057aa08a92c861321e2498c7f87",
"size": "2681",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packtools/webapp/views.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "204"
},
{
"name": "Python",
"bytes": "2055715"
},
{
"name": "Shell",
"bytes": "1508"
},
{
"name": "XSLT",
"bytes": "228241"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
__version__ = version = '0.6.0'
import sys
from .core import httpretty, httprettified
from .errors import HTTPrettyError
from .core import URIInfo
HTTPretty = httpretty
activate = httprettified
SELF = sys.modules[__name__]
for attr in [name.decode() for name in httpretty.METHODS] + ['register_uri', 'enable', 'disable', 'is_enabled', 'Response']:
setattr(SELF, attr, getattr(httpretty, attr))
|
{
"content_hash": "d3e843710a882fd5cbb40234ca84e284",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 124,
"avg_line_length": 26.11764705882353,
"alnum_prop": 0.722972972972973,
"repo_name": "maxmind/HTTPretty",
"id": "d0e88a993331369dad2560f905890ac5b6843b34",
"size": "1669",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "httpretty/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "107692"
}
],
"symlink_target": ""
}
|
import hTools2.dialogs.glyphs.anchors_transfer
reload(hTools2.dialogs.glyphs.anchors_transfer)
hTools2.dialogs.glyphs.anchors_transfer.transferAnchorsDialog()
|
{
"content_hash": "991b86f1f7323d047699066b4cf156f7",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 63,
"avg_line_length": 40,
"alnum_prop": 0.8625,
"repo_name": "gferreira/hTools2_extension",
"id": "ebafdc746b9c7d4e183a775c06c1a52912338ab2",
"size": "191",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hTools2.roboFontExt/lib/Scripts/selected glyphs/anchors/transfer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18852"
},
{
"name": "HTML",
"bytes": "1477535"
},
{
"name": "JavaScript",
"bytes": "98858"
},
{
"name": "Python",
"bytes": "686182"
}
],
"symlink_target": ""
}
|
"""
Code to sum characteristics of events split by either time or speed and write
out the results to a table.
Author: Rachel H White rhwhite@uw.edu
Created: Oct 2016
Example use:
python CalculateDomainAverages.py --Data TRMM --Version Standard \
--anstartyr 1998 --anendyr 2014 \
--tbound1 0 1 2 5 --tbound2 1 2 5 100 --splittype day \
--unit day --minlat -40 --maxlat 40
"""
import os, errno
import numpy as np
import netCDF4
from netCDF4 import Dataset
import datetime as dt
import re
import sys
import Ngl
import xray
import math
import resource
import argparse
from rhwhitepackages.readwrite import getunitsdesc
from rhwhitepackages.readwrite import xrayOpen
from rhwhitepackages.readwrite import getdirectory
parser = argparse.ArgumentParser(description="Calculate domain averages")
parser.add_argument('--minlat',type=int,nargs='?',default=-45,help='min lat')
parser.add_argument('--maxlat',type=int,nargs='?',default=45,help='max lat')
parser.add_argument('--minlon',type=int,nargs='?',default=0,help='min lon')
parser.add_argument('--maxlon',type=int,nargs='?',default=360,help='max lon')
parser.add_argument('--splittype',metavar='splittype',type=str,nargs=1,
help='the type of split you want, day, speed, or maxspeed')
parser.add_argument('--speedtspan',metavar='speedtspan',type=int,nargs='?',default=4,
help='how many time spans does the speed average cover?')
parser.add_argument('--tbound1',metavar='tbound1',type=float,nargs='+',
help='lower bounds')
parser.add_argument('--tbound2',metavar='tbound2',type=float,nargs="+",
help='upper bounds')
parser.add_argument('--unit',type=str,nargs=1,help='units of split type')
parser.add_argument('--Data',type=str,nargs=1,
help='type of Data, TRMM, ERAI, or CESM')
parser.add_argument('--Version',type=str,nargs=1,
help='Version of Data, Standard, low, 6th_from6 etc')
parser.add_argument('--anstartyr',type=int,nargs=1,
help='start year for analysis')
parser.add_argument('--anendyr',type=int,nargs=1,help='end year for analysis')
parser.add_argument('--test',type=int,nargs='?',default=0,help='1 for test')
parser.add_argument('--extra',type=int,nargs='?',default=0,help='1 for test')
parser.add_argument('--tperday',type=int,nargs='?',default=8,help=
'timesteps per day, default is 3hourly = 8')
parser.add_argument('--minGB',type=int,nargs=1,default=0,help='min gridboxes')
args = parser.parse_args()
print(str(args))
print(str(dt.date.today()))
# put inputs into the type and variable names we want
splittype = args.splittype[0]
speedtspan = args.speedtspan
# multiply tbound in by 24 to get hours rather than days
tbound1 = np.multiply(args.tbound1,24.0)
tbound2 = np.multiply(args.tbound2,24.0)
unit = args.unit[0]
Data = args.Data[0]
Version = args.Version[0]
anstartyr = args.anstartyr[0]
anendyr = args.anendyr[0]
minlon = args.minlon
maxlon = args.maxlon
minlat = args.minlat
maxlat = args.maxlat
test = args.test
extra = args.extra
minGB = args.minGB[0]
diradd = getdirectory(splittype)
nbounds = len(tbound1)
print(tbound1)
print minGB
print str(minGB)
R = 6371000 # radius of Earth in m
nyears = anendyr - anstartyr + 1
minevent = 100000
if Data == "TRMM":
FileInLats = ('/home/disk/eos4/rachel/Obs/TRMM/'
'SeasAnn_TRMM_1998-2014_3B42_3hrly_nonan.nc')
Fstartyr = 1998
Fendyr = 2014
elif Data == "TRMMERAIgd":
FileInLats = ('/home/disk/eos4/rachel/Obs/TRMM/'
'regrid2ERAI_TRMM_3B42_1998-2014.nc')
Fstartyr = 1998
Fendyr = 2014
elif Data == "ERAI":
FileInLats = ('/home/disk/eos4/rachel/Obs/ERAI/3hrly/Precip_3hrly/'
'SeasAnn_ERAI_Totalprecip_1980-2014_preprocess.nc')
Fstartyr = 1980
Fendyr = 2014
elif Data == "ERA20C":
FileInLats = '/home/disk/eos4/rachel/Obs/ERA_20C/ERA_20C_LatLon.nc'
elif Data == "CESM":
FileInLats = ('/home/disk/eos4/rachel/EventTracking/Inputs/CESM/'
'f.e13.FAMPIC5.ne120_ne120.1979_2012.001/'
'f.e13.FAMIPC5.ne120_ne120_TotalPrecip_1979-2012.nc')
else:
print("unexpected data type")
exit()
DirI = ('/home/disk/eos4/rachel/EventTracking/FiT_RW_ERA/' + Data + '_output/'
+ Version + str(Fstartyr) + '/proc/')
DirO = DirI + diradd + '/'
# Work out how many to skip at beginning and end of file
filedate = dt.date(Fstartyr,1,1)
startdate = dt.date(anstartyr,1,1)
enddate = dt.date(anendyr+1,1,1)
if Fstartyr == anstartyr:
starttstart = 0
elif Fstartyr > anstartyr:
print("Fstartyr is ", Fstartyr, " and anstartyr is ",anstartyr)
exit("cannot start analysing data before there is data!")
elif Fstartyr < anstartyr:
diffdays = (startdate-filedate).days
starttstart = diffdays * args.tperday
endtstart = starttstart + (enddate - startdate).days * args.tperday
# In[4]:
#Get lons and lats
FileIn = xrayOpen(FileInLats)
if Data == "CESM":
lats = FileIn['lat'].values
lons = FileIn['lon'].values
elif Data in ["ERA20C","TRMMERAIgd"]:
lats = FileIn['latitude'].values
lons = FileIn['longitude'].values
else:
lats = FileIn['Latitude'].values
lons = FileIn['Longitude'].values
nlats = len(lats)
nlons = len(lons)
# initialize data
averageydist = np.zeros([nbounds],float)
averagexdist = np.zeros([nbounds],float)
averageprecipperhr = np.zeros([nbounds],float)
averageprecipperareahr = np.zeros([nbounds],float)
averagetime = np.zeros([nbounds],float)
averagem2 = np.zeros([nbounds],float)
averagegridboxes = np.zeros([nbounds],float)
precipvolume = np.zeros([nbounds],float)
count = np.zeros([nbounds],int)
# In[5]:
# open main dataset and read in data
FileI1 = ('All_Precip_' + str(Fstartyr) + '-' + str(Fendyr) + '_' + Data + '_' +
Version + '.nc')
datain = xrayOpen(DirI + FileI1,decodetimes=False)
nevents = len(datain.events)
ycenterstart = datain.ycenterstart[0:nevents].values
xcenterstart = datain.xcenterstart[0:nevents].values
ycenterend = datain.ycenterend[0:nevents].values
xcenterend = datain.xcenterend[0:nevents].values
ycentermean = datain.ycentermean[0:nevents].values
xcentermean = datain.xcentermean[0:nevents].values
tstart = datain.tstart[0:nevents].values
timespan = datain.timespan[0:nevents].values
totalprecip = datain.totalprecip[0:nevents].values
totalprecipSA = datain.totalprecipSA[0:nevents].values
gridboxspan = datain.gridboxspan[0:nevents].values
gridboxspanSA = datain.gridboxspanSA[0:nevents].values
unigridboxspan = datain.uniquegridboxspan[0:nevents].values
# In[6]:
# Set fileminlat and filemaxlat if we ran FiT on a subsection of the data
fileminlat = -90
filemaxlat = 90
# In[ ]:
def isinregion(ilat,ilon):
if ilat < minlat or ilat > maxlat:
return(False)
else:
if checklons:
if ilon < minlon or ilon > maxlon:
return False
else:
return(True)
# In[ ]:
checklons = True
if minlon == 0 and maxlon == 360:
checklons = False
elif minlon ==-180 and maxlon == 180:
checklons = False
if test == 1:
nevents = 10000
filenameadd = "test_"
else:
nevents = len(datain.events)
filenameadd = ""
for ievent in range(0,nevents):
if (ievent % 100000 == 0):
print "ievent: " + str(ievent)
# check is in timeframe of analysis
# assuming that tstart = 0 is equal to Fstartyr
if unigridboxspan[ievent] > minGB: # if unique
# gridboxes exceed specified threshold
if tstart[ievent] >= starttstart and tstart[ievent] <= endtstart:
# check if in region
if isinregion(lats[ycenterstart[ievent]],
lons[xcenterstart[ievent]]):
if isinregion(lats[ycenterend[ievent]],
lons[xcenterend[ievent]]):
for ibound in range(0,nbounds):
if timespan[ievent] < tbound2[ibound]:
if extra == 1:
averageydist[ibound] += (lats[ycenterend[ievent]] -
lats[ycenterstart[ievent]])
averagexdist[ibound] += (lons[xcenterend[ievent]] -
lons[xcenterstart[ievent]])
# if negative then that's fine for NH, positive is fine
# for southern hemisphere, so get the average event
# distance travelled
averagetime[ibound] += timespan[ievent]
averageprecipperhr[ibound] += (
totalprecip[ievent]/timespan[ievent])
# Include factor of 3 to convert to hours, not timesteps
averageprecipperareahr[ibound] += (
totalprecip[ievent]/(3.0 *
gridboxspan[ievent]))
# Include factor of 3 because timespan is in hours, not timesteps
averagem2[ibound] += ( gridboxspanSA[ievent] * 3.0 /
timespan[ievent])
# Include factor of 3 because timespan is in hours, not
# timesteps
averagegridboxes[ibound] += ( gridboxspan[ievent] * 3.0 /
timespan[ievent])
precipvolume[ibound] += totalprecipSA[ievent]
count[ibound] += 1
break
# In[ ]:
if extra == 1:
averageydist = averageydist / count
averagexdist = averagexdist / count
averagetime = averagetime / count
averageprecipperhr = averageprecipperhr / count
averageprecipperareahr = averageprecipperareahr / count
averagem2 = averagem2/count
averagegridboxes = averagegridboxes/count
# convert from m2 to km2
averagekm2 = averagem2 / (1000.0 * 1000.0)
# In[ ]:
# Write out to a text file
filename = (filenameadd + 'Averages_' + '{:d}'.format(minlat) + 'N-' +
'{:d}'.format(maxlat) + 'N_min' + str(minGB) + 'GB.txt')
with open(DirI + filename, 'w') as text_file:
text_file.write('Domain averages for ' + '{:d}'.format(minlat) + 'N-' +
'{:d}'.format(maxlat) + 'N and ' + '{:d}'.format(minlon) + 'E-'
+ '{:d}'.format(maxlon) + 'E \n')
text_file.write('Created by CalculateDomainTotalAverages.py on ' +
str(dt.date.today()) + '\n')
text_file.write('Input Arguments: \n')
text_file.write(str(args) + '\n')
if extra == 1:
text_file.write('timespan (hours), \t count (events/yr), \t average '
'latitude distance (degs), \t average longitude distance '
'(degrees) \t averagepreciphr (mm/hr), \t averagepreciphr '
'(mm/gridbox/hr) \t total precip (m3 /yr) \n')
for ibound in range(0,nbounds):
text_file.write('{:.1f}'.format(tbound1[ibound]) + '-' +
'{:.1f}'.format(tbound2[ibound]) + 'hours, ' +
'{:.2e}'.format(count[ibound]/nyears) +
'; ' +
'{:.2f}'.format(averageydist[ibound]) +
'; ' +
'{:.2f}'.format(averagexdist[ibound]) +
'; ' +
'{:.2f}'.format(averagetime[ibound]) +
'; ' +
'{:.2e}'.format(averagekm2[ibound]) +
'; ' +
'{:.2e}'.format(averageprecipperhr[ibound]) +
'; ' +
'{:.2e}'.format(averageprecipperareahr[ibound])
+ '; ' +
'{:.2e}'.format(precipvolume[ibound]/nyears)
+ ' \n')
else:
text_file.write('timespan (hours), \t count (events/yr), \t average '
'time, \t average footprint (km2) '
'\t average footprint (gridboxes) '
'\t averagepreciphr (gridboxes mm/hr), \taveragepreciphr '
'(mm/hr) \t total precip (m3 /yr) \n')
for ibound in range(0,nbounds):
text_file.write('{:.1f}'.format(tbound1[ibound]) + '-' +
'{:.1f}'.format(tbound2[ibound]) + 'hours; \t' +
'{:.2e}'.format(count[ibound]/nyears) +
'; \t' +
'{:.2f}'.format(averagetime[ibound]) +
'; \t' +
'{:.2e}'.format(averagekm2[ibound]) +
'; \t' +
'{:.2e}'.format(averagegridboxes[ibound]) +
'; \t' +
'{:.2e}'.format(averageprecipperhr[ibound]) +
'; \t' +
'{:.2e}'.format(averageprecipperareahr[ibound])
+ '; \t ' +
'{:.2e}'.format(precipvolume[ibound]/nyears)
+ ' \n')
# In[ ]:
datain.close()
|
{
"content_hash": "222df6818ca00987913a723a2b693716",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 97,
"avg_line_length": 35.97855227882037,
"alnum_prop": 0.5698211624441133,
"repo_name": "rhwhite/eventTracking",
"id": "59d7fd655f993dee8357fb3fda7c55dc22073357",
"size": "13420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "analysis/CalculateDomainAverages.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "175165"
},
{
"name": "Shell",
"bytes": "8536"
}
],
"symlink_target": ""
}
|
from django.contrib.gis.db import models
from django.conf import settings
from django.db.models import Max,Min
# Create your models here.
class DepthSounding(models.Model):
depth_ft = models.IntegerField(blank=True,null=True)
geometry = models.PointField(srid=settings.GEOMETRY_DB_SRID)
objects = models.GeoManager()
def __unicode__(self):
return unicode(self.depth_ft)
def soundings_in_geom(geom):
"""
Return a query set of depthsoundings that fall within the supplied geometry.
"""
return DepthSounding.objects.filter(geometry__within=geom)
def translate_to_positive_numbers(ds_min,ds_max):
"""
Take a min and max depth as either negavite elevations or positive depths and
return positive depth values for shallowest and deepest.
"""
if ds_min is not None and ds_max is not None:
shallowest_positive = min([abs(ds_min),abs(ds_max)])
deepest_positive = max([abs(ds_min),abs(ds_max)])
else:
shallowest_positive = None
deepest_positive = None
if shallowest_positive and shallowest_positive < 10:
shallowest_positive = 0
return shallowest_positive, deepest_positive
def depth_range(geom):
"""
Return the Minimum and Maximum depth for a geometry. If the minimum is less than
10 ft., return 0 as the min.
Always return positive numbers.
"""
ds = soundings_in_geom(geom)
ds_min = ds.aggregate(Min('depth_ft'))['depth_ft__min']
ds_max = ds.aggregate(Max('depth_ft'))['depth_ft__max']
# Account for the fact that soundings may be negative or positive
return translate_to_positive_numbers(ds_min,ds_max)
def total_depth_range():
"""
Return the shallowest and deepest depth for the whole depth sounding data set.
Always return positive numbers.
"""
ds_min = DepthSounding.objects.aggregate(Min('depth_ft'))['depth_ft__min']
ds_max = DepthSounding.objects.aggregate(Max('depth_ft'))['depth_ft__max']
# Account for the fact that soundings may be negative or positive
return translate_to_positive_numbers(ds_min,ds_max)
|
{
"content_hash": "685df51f25df785e2e5858462ca8caf4",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 85,
"avg_line_length": 37.857142857142854,
"alnum_prop": 0.6886792452830188,
"repo_name": "google-code-export/marinemap",
"id": "7c423456cff38b7f55d7d8528e5109774e54ff0b",
"size": "2120",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lingcod/depth_range/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "62866"
},
{
"name": "HTML",
"bytes": "350564"
},
{
"name": "JavaScript",
"bytes": "1435695"
},
{
"name": "PLpgSQL",
"bytes": "3371"
},
{
"name": "Python",
"bytes": "1152113"
},
{
"name": "Shell",
"bytes": "12077"
}
],
"symlink_target": ""
}
|
import time
from copy import copy
from Adafruit_I2C import Adafruit_I2C
# ============================================================================
# LEDBackpack Class
# ============================================================================
class LEDBackpack:
i2c = None
# Registers
__HT16K33_REGISTER_DISPLAY_SETUP = 0x80
__HT16K33_REGISTER_SYSTEM_SETUP = 0x20
__HT16K33_REGISTER_DIMMING = 0xE0
# Data base addresses
__HT16K33_ADDRESS_KEY_DATA = 0x40
# Blink rate
__HT16K33_BLINKRATE_OFF = 0x00
__HT16K33_BLINKRATE_2HZ = 0x01
__HT16K33_BLINKRATE_1HZ = 0x02
__HT16K33_BLINKRATE_HALFHZ = 0x03
# Display buffer (8x16-bits)
__buffer = [0x0000, 0x0000, 0x0000, 0x0000, \
0x0000, 0x0000, 0x0000, 0x0000 ]
# Constructor
def __init__(self, address=0x70, debug=False):
self.i2c = Adafruit_I2C(address)
self.address = address
self.debug = debug
# Turn the oscillator on
self.i2c.write8(self.__HT16K33_REGISTER_SYSTEM_SETUP | 0x01, 0x00)
# Turn blink off
self.setBlinkRate(self.__HT16K33_BLINKRATE_OFF)
# Set maximum brightness
self.setBrightness(15)
# Clear the screen
self.clear()
def setBrightness(self, brightness):
"Sets the brightness level from 0..15"
if (brightness > 15):
brightness = 15
self.i2c.write8(self.__HT16K33_REGISTER_DIMMING | brightness, 0x00)
def setBlinkRate(self, blinkRate):
"Sets the blink rate"
if (blinkRate > self.__HT16K33_BLINKRATE_HALFHZ):
blinkRate = self.__HT16K33_BLINKRATE_OFF
self.i2c.write8(self.__HT16K33_REGISTER_DISPLAY_SETUP | 0x01 | (blinkRate << 1), 0x00)
def setBufferRow(self, row, value, update=True):
"Updates a single 16-bit entry in the 8*16-bit buffer"
if (row > 7):
return # Prevent buffer overflow
self.__buffer[row] = value # value # & 0xFFFF
if (update):
self.writeDisplay() # Update the display
def getBufferRow(self, row):
"Returns a single 16-bit entry in the 8*16-bit buffer"
if (row > 7):
return
return self.__buffer[row]
def getBuffer(self):
"Returns a copy of the raw buffer contents"
bufferCopy = copy(self.__buffer)
return bufferCopy
def writeDisplay(self):
"Updates the display memory"
bytes = []
for item in self.__buffer:
bytes.append(item & 0xFF)
bytes.append((item >> 8) & 0xFF)
self.i2c.writeList(0x00, bytes)
def getKeys(self, row):
"Returns a row of scanned key press values as a single 13-bit value (K13:K1)"
if (row > 2):
return
return self.i2c.readU16(self.__HT16K33_ADDRESS_KEY_DATA + row*2)
def clear(self, update=True):
"Clears the display memory"
self.__buffer = [ 0, 0, 0, 0, 0, 0, 0, 0 ]
if (update):
self.writeDisplay()
|
{
"content_hash": "77df63ba73746cc5596a37613d41b13f",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 90,
"avg_line_length": 29.744897959183675,
"alnum_prop": 0.5927958833619211,
"repo_name": "CSF-JH/crossbarexamples",
"id": "fd41b453c61da7c074d5fc9818ba20f2e6b8c08a",
"size": "2934",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "iotcookbook/device/pi/quadalpha/Adafruit_LEDBackpack.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Arduino",
"bytes": "34842"
},
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "C#",
"bytes": "7363"
},
{
"name": "C++",
"bytes": "21426"
},
{
"name": "CSS",
"bytes": "596166"
},
{
"name": "Erlang",
"bytes": "7903"
},
{
"name": "HTML",
"bytes": "1618067"
},
{
"name": "Java",
"bytes": "10442"
},
{
"name": "JavaScript",
"bytes": "1584356"
},
{
"name": "Lua",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "18676"
},
{
"name": "PHP",
"bytes": "45760"
},
{
"name": "PLSQL",
"bytes": "401670"
},
{
"name": "Python",
"bytes": "278547"
},
{
"name": "SQLPL",
"bytes": "6303"
},
{
"name": "Shell",
"bytes": "729"
}
],
"symlink_target": ""
}
|
import os
import sys
try:
# Python >=3.3
from unittest.mock import MagicMock
except ImportError:
from mock import MagicMock
class Mock(MagicMock):
# xcbq does a dir() on objects and pull stuff out of them and tries to sort
# the result. MagicMock has a bunch of stuff that can't be sorted, so let's
# like about dir().
def __dir__(self):
return []
MOCK_MODULES = [
'libqtile._ffi_pango',
'libqtile._ffi_xcursors',
'cairocffi',
'cffi',
'dateutil',
'dateutil.parser',
'dbus',
'dbus.mainloop.glib',
'iwlib',
'keyring',
'mpd',
'trollius',
'xcffib',
'xcffib.randr',
'xcffib.xfixes',
'xcffib.xinerama',
'xcffib.xproto',
'xdg.IconTheme',
]
sys.modules.update((mod_name, Mock()) for mod_name in MOCK_MODULES)
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath('../'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.graphviz',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinxcontrib.seqdiag',
'sphinx_qtile',
'numpydoc',
]
numpydoc_show_class_members = False
# Add any paths that contain templates here, relative to this directory.
templates_path = []
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Qtile'
copyright = u'2008-2016, Aldo Cortesi and contributers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.10.7'
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'man']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output --------fautod-------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {'index': 'index.html'}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Qtiledoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Qtile.tex', u'Qtile Documentation',
u'Aldo Cortesi', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('man/qtile', 'qtile', u'Qtile Documentation',
[u'Tycho Andersen'], 1),
('man/qshell', 'qshell', u'Qtile Documentation',
[u'Tycho Andersen'], 1),
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Qtile', u'Qtile Documentation',
u'Aldo Cortesi', 'Qtile', 'A hackable tiling window manager.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# only import and set the theme if we're building docs locally
if not os.environ.get('READTHEDOCS'):
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
graphviz_dot_args = ['-Lg']
# A workaround for the responsive tables always having annoying scrollbars.
def setup(app):
app.add_stylesheet("no_scrollbars.css")
|
{
"content_hash": "f9cb01cb4fe90c6a67e513d4c3510994",
"timestamp": "",
"source": "github",
"line_count": 296,
"max_line_length": 86,
"avg_line_length": 30.550675675675677,
"alnum_prop": 0.6912529027977441,
"repo_name": "kynikos/qtile",
"id": "c6b5026bc2bea31a1e4bc1fb3658889f8d638b20",
"size": "9459",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1135"
},
{
"name": "Python",
"bytes": "1141487"
},
{
"name": "Roff",
"bytes": "3605"
},
{
"name": "Shell",
"bytes": "5603"
}
],
"symlink_target": ""
}
|
import os
from flask import Flask
from webassets.loaders import PythonLoader as PythonAssetsLoader
from memex import assets
from memex.models import db
from memex.extensions import (
cache,
assets_env,
debug_toolbar,
login_manager
)
class ReverseProxied(object):
'''Wrap the application in this middleware and configure the
front-end server to add these headers, to let you quietly bind
this to a URL other than / and to an HTTP scheme that is
different than what is used locally.
In nginx:
location /myprefix {
proxy_pass http://192.168.0.1:5001;
proxy_set_header Host $host;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Scheme $scheme;
proxy_set_header X-Script-Name /myprefix;
}
:param app: the WSGI application
'''
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
script_name = environ.get('HTTP_X_SCRIPT_NAME', '')
if script_name:
environ['SCRIPT_NAME'] = script_name
path_info = environ['PATH_INFO']
if path_info.startswith(script_name):
environ['PATH_INFO'] = path_info[len(script_name):]
scheme = environ.get('HTTP_X_SCHEME', '')
if scheme:
environ['wsgi.url_scheme'] = scheme
return self.app(environ, start_response)
def create_app(object_name, env="prod"):
"""
An flask application factory, as explained here:
http://flask.pocoo.org/docs/patterns/appfactories/
Arguments:
object_name: the python path of the config object,
e.g. memex.settings.ProdConfig
env: The name of the current environment, e.g. prod or dev
"""
app = Flask(__name__)
app.wsgi_app = ReverseProxied(app.wsgi_app)
app.config.from_object(object_name)
app.config['ENV'] = env
#init the cache
cache.init_app(app)
debug_toolbar.init_app(app)
#init SQLAlchemy
db.init_app(app)
login_manager.init_app(app)
# Import and register the different asset bundles
assets_env.init_app(app)
assets_loader = PythonAssetsLoader(assets)
for name, bundle in assets_loader.load_bundles().iteritems():
assets_env.register(name, bundle)
# register our blueprints
from controllers.main import main
app.register_blueprint(main)
return app
if __name__ == '__main__':
# Import the config for the proper environment using the
# shell var MEMEX_ENV
env = os.environ.get('MEMEX_ENV', 'prod')
app = create_app('memex.settings.%sConfig' % env.capitalize(), env=env)
app.run()
|
{
"content_hash": "cba28de427dcf5821f01568c69c8f01e",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 75,
"avg_line_length": 27.5,
"alnum_prop": 0.6415584415584416,
"repo_name": "pymonger/facetview-memex",
"id": "63f0583b6707c05eb839a78586d3062bb5e37fe8",
"size": "2716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "memex/__init__.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1219"
},
{
"name": "CSS",
"bytes": "133848"
},
{
"name": "CoffeeScript",
"bytes": "18114"
},
{
"name": "HTML",
"bytes": "599193"
},
{
"name": "JavaScript",
"bytes": "1480997"
},
{
"name": "Makefile",
"bytes": "650"
},
{
"name": "PHP",
"bytes": "9044"
},
{
"name": "Python",
"bytes": "13109"
},
{
"name": "Shell",
"bytes": "126"
}
],
"symlink_target": ""
}
|
import fileinput
from termcolor import cprint
counter = 0
for line in fileinput.input():
if counter:
counter += 1
if line.startswith('='):
counter = 1
if counter > 5:
if line.startswith('\t'):
cprint(line.strip(), 'yellow')
else:
cprint(line.strip(), 'blue')
else:
print(line.strip())
|
{
"content_hash": "69ef17f131adf59fbdb9f1cd008ffe76",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 42,
"avg_line_length": 21.58823529411765,
"alnum_prop": 0.547683923705722,
"repo_name": "tiborsimon/dotfiles",
"id": "fcd207fc242bc8e24093a82a45627177bf88ae57",
"size": "390",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/networks/tcpdump-colorflow.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Emacs Lisp",
"bytes": "39746"
},
{
"name": "Makefile",
"bytes": "2330"
},
{
"name": "Python",
"bytes": "94923"
},
{
"name": "Shell",
"bytes": "155709"
},
{
"name": "Vim script",
"bytes": "38713"
}
],
"symlink_target": ""
}
|
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
from h2o.transforms.preprocessing import H2OScaler
def test_scaler():
iris = h2o.import_file(pyunit_utils.locate("smalldata/iris/iris.csv"))
scaler = H2OScaler()
scaler.fit(iris)
iris_transformed = scaler.transform(iris)
assert [[u'Iris-setosa', u'Iris-versicolor', u'Iris-virginica']] == iris_transformed["C5"].levels()
assert max(iris_transformed[["C1", "C2", "C3", "C4"]].mean().as_data_frame().transpose()[0].tolist()) < 1e-10
if __name__ == "__main__":
pyunit_utils.standalone_test(test_scaler)
else:
test_scaler()
|
{
"content_hash": "b6f08408e4d2541ac4796e505813e0d7",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 113,
"avg_line_length": 27.82608695652174,
"alnum_prop": 0.6671875,
"repo_name": "h2oai/h2o-3",
"id": "3f62a27960639c2e920d5e9d5deade72d596973e",
"size": "640",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_munging/pyunit_h2oscaler.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "12803"
},
{
"name": "CSS",
"bytes": "882321"
},
{
"name": "CoffeeScript",
"bytes": "7550"
},
{
"name": "DIGITAL Command Language",
"bytes": "106"
},
{
"name": "Dockerfile",
"bytes": "10459"
},
{
"name": "Emacs Lisp",
"bytes": "2226"
},
{
"name": "Groovy",
"bytes": "205646"
},
{
"name": "HCL",
"bytes": "36232"
},
{
"name": "HTML",
"bytes": "8018117"
},
{
"name": "HiveQL",
"bytes": "3985"
},
{
"name": "Java",
"bytes": "15981357"
},
{
"name": "JavaScript",
"bytes": "148426"
},
{
"name": "Jupyter Notebook",
"bytes": "20638329"
},
{
"name": "Makefile",
"bytes": "46043"
},
{
"name": "PHP",
"bytes": "800"
},
{
"name": "Python",
"bytes": "8188608"
},
{
"name": "R",
"bytes": "4149977"
},
{
"name": "Ruby",
"bytes": "64"
},
{
"name": "Sass",
"bytes": "23790"
},
{
"name": "Scala",
"bytes": "4845"
},
{
"name": "Shell",
"bytes": "214495"
},
{
"name": "Smarty",
"bytes": "1792"
},
{
"name": "TeX",
"bytes": "554940"
}
],
"symlink_target": ""
}
|
from web import app
from flask import render_template
from flask_login import current_user
@app.route('/login')
def login():
return 'hello'
@app.route('/me')
def profile():
user = current_user
return render_template('user/profile.html', user=user)
|
{
"content_hash": "95071cf2be36f9ba582160358619030e",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 17.666666666666668,
"alnum_prop": 0.7018867924528301,
"repo_name": "alexraileanu/todo",
"id": "f8535a3b4b3517f132f0ec0dc9438e5d6263c406",
"size": "265",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "web/routes/user.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2497"
},
{
"name": "HTML",
"bytes": "8410"
},
{
"name": "JavaScript",
"bytes": "2568"
},
{
"name": "Python",
"bytes": "10252"
}
],
"symlink_target": ""
}
|
"""Script to pre-process classification data into tfrecords."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import os
from absl import app
from absl import flags
from absl import logging
import numpy as np
import tensorflow as tf
import sentencepiece as spm
from official.nlp.xlnet import classifier_utils
from official.nlp.xlnet import preprocess_utils
flags.DEFINE_bool(
"overwrite_data",
default=False,
help="If False, will use cached data if available.")
flags.DEFINE_string("output_dir", default="", help="Output dir for TF records.")
flags.DEFINE_string(
"spiece_model_file", default="", help="Sentence Piece model path.")
flags.DEFINE_string("data_dir", default="", help="Directory for input data.")
# task specific
flags.DEFINE_string("eval_split", default="dev", help="could be dev or test")
flags.DEFINE_string("task_name", default=None, help="Task name")
flags.DEFINE_integer(
"eval_batch_size", default=64, help="batch size for evaluation")
flags.DEFINE_integer("max_seq_length", default=128, help="Max sequence length")
flags.DEFINE_integer(
"num_passes",
default=1,
help="Num passes for processing training data. "
"This is use to batch data without loss for TPUs.")
flags.DEFINE_bool("uncased", default=False, help="Use uncased.")
flags.DEFINE_bool(
"is_regression", default=False, help="Whether it's a regression task.")
flags.DEFINE_bool(
"use_bert_format",
default=False,
help="Whether to use BERT format to arrange input data.")
FLAGS = flags.FLAGS
class InputExample(object):
"""A single training/test example for simple sequence classification."""
def __init__(self, guid, text_a, text_b=None, label=None):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
text_a: string. The untokenized text of the first sequence. For single
sequence tasks, only this sequence must be specified.
text_b: (Optional) string. The untokenized text of the second sequence.
Only must be specified for sequence pair tasks.
label: (Optional) string. The label of the example. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.text_a = text_a
self.text_b = text_b
self.label = label
class DataProcessor(object):
"""Base class for data converters for sequence classification data sets."""
def get_train_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the train set."""
raise NotImplementedError()
def get_dev_examples(self, data_dir):
"""Gets a collection of `InputExample`s for the dev set."""
raise NotImplementedError()
def get_test_examples(self, data_dir):
"""Gets a collection of `InputExample`s for prediction."""
raise NotImplementedError()
def get_labels(self):
"""Gets the list of labels for this data set."""
raise NotImplementedError()
@classmethod
def _read_tsv(cls, input_file, quotechar=None):
"""Reads a tab separated value file."""
with tf.io.gfile.GFile(input_file, "r") as f:
reader = csv.reader(f, delimiter="\t", quotechar=quotechar)
lines = []
for line in reader:
# pylint: disable=g-explicit-length-test
if len(line) == 0:
continue
lines.append(line)
return lines
class GLUEProcessor(DataProcessor):
"""GLUEProcessor."""
def __init__(self):
self.train_file = "train.tsv"
self.dev_file = "dev.tsv"
self.test_file = "test.tsv"
self.label_column = None
self.text_a_column = None
self.text_b_column = None
self.contains_header = True
self.test_text_a_column = None
self.test_text_b_column = None
self.test_contains_header = True
def get_train_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, self.train_file)), "train")
def get_dev_examples(self, data_dir):
"""See base class."""
return self._create_examples(
self._read_tsv(os.path.join(data_dir, self.dev_file)), "dev")
def get_test_examples(self, data_dir):
"""See base class."""
if self.test_text_a_column is None:
self.test_text_a_column = self.text_a_column
if self.test_text_b_column is None:
self.test_text_b_column = self.text_b_column
return self._create_examples(
self._read_tsv(os.path.join(data_dir, self.test_file)), "test")
def get_labels(self):
"""See base class."""
return ["0", "1"]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = "%s-%s" % (set_type, i)
a_column = (
self.text_a_column if set_type != "test" else self.test_text_a_column)
b_column = (
self.text_b_column if set_type != "test" else self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
logging.warning("Incomplete line, ignored.")
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
logging.warning("Incomplete line, ignored.")
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = self.get_labels()[0]
else:
if len(line) <= self.label_column:
logging.warning("Incomplete line, ignored.")
continue
label = line[self.label_column]
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
class Yelp5Processor(DataProcessor):
"""Yelp5Processor."""
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "train.csv"))
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "test.csv"))
def get_labels(self):
"""See base class."""
return ["1", "2", "3", "4", "5"]
def _create_examples(self, input_file):
"""Creates examples for the training and dev sets."""
examples = []
with tf.io.gfile.GFile(input_file) as f:
reader = csv.reader(f)
for i, line in enumerate(reader):
label = line[0]
text_a = line[1].replace('""', '"').replace('\\"', '"')
examples.append(
InputExample(guid=str(i), text_a=text_a, text_b=None, label=label))
return examples
class ImdbProcessor(DataProcessor):
"""ImdbProcessor."""
def get_labels(self):
return ["neg", "pos"]
def get_train_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "train"))
def get_dev_examples(self, data_dir):
return self._create_examples(os.path.join(data_dir, "test"))
def _create_examples(self, data_dir):
"""Creates examples."""
examples = []
for label in ["neg", "pos"]:
cur_dir = os.path.join(data_dir, label)
for filename in tf.io.gfile.listdir(cur_dir):
if not filename.endswith("txt"):
continue
if len(examples) % 1000 == 0:
logging.info("Loading dev example %d", len(examples))
path = os.path.join(cur_dir, filename)
with tf.io.gfile.GFile(path) as f:
text = f.read().strip().replace("<br />", " ")
examples.append(
InputExample(
guid="unused_id", text_a=text, text_b=None, label=label))
return examples
class MnliMatchedProcessor(GLUEProcessor):
"""MnliMatchedProcessor."""
def __init__(self):
super(MnliMatchedProcessor, self).__init__()
self.dev_file = "dev_matched.tsv"
self.test_file = "test_matched.tsv"
self.label_column = -1
self.text_a_column = 8
self.text_b_column = 9
def get_labels(self):
return ["contradiction", "entailment", "neutral"]
class MnliMismatchedProcessor(MnliMatchedProcessor):
def __init__(self):
super(MnliMismatchedProcessor, self).__init__()
self.dev_file = "dev_mismatched.tsv"
self.test_file = "test_mismatched.tsv"
class StsbProcessor(GLUEProcessor):
"""StsbProcessor."""
def __init__(self):
super(StsbProcessor, self).__init__()
self.label_column = 9
self.text_a_column = 7
self.text_b_column = 8
def get_labels(self):
return [0.0]
def _create_examples(self, lines, set_type):
"""Creates examples for the training and dev sets."""
examples = []
for (i, line) in enumerate(lines):
if i == 0 and self.contains_header and set_type != "test":
continue
if i == 0 and self.test_contains_header and set_type == "test":
continue
guid = "%s-%s" % (set_type, i)
a_column = (
self.text_a_column if set_type != "test" else self.test_text_a_column)
b_column = (
self.text_b_column if set_type != "test" else self.test_text_b_column)
# there are some incomplete lines in QNLI
if len(line) <= a_column:
logging.warning("Incomplete line, ignored.")
continue
text_a = line[a_column]
if b_column is not None:
if len(line) <= b_column:
logging.warning("Incomplete line, ignored.")
continue
text_b = line[b_column]
else:
text_b = None
if set_type == "test":
label = self.get_labels()[0]
else:
if len(line) <= self.label_column:
logging.warning("Incomplete line, ignored.")
continue
label = float(line[self.label_column])
examples.append(
InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
return examples
def file_based_convert_examples_to_features(examples,
label_list,
max_seq_length,
tokenize_fn,
output_file,
num_passes=1):
"""Convert a set of `InputExample`s to a TFRecord file."""
# do not create duplicated records
if tf.io.gfile.exists(output_file) and not FLAGS.overwrite_data:
logging.info("Do not overwrite tfrecord %s exists.", output_file)
return
logging.info("Create new tfrecord %s.", output_file)
writer = tf.io.TFRecordWriter(output_file)
examples *= num_passes
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logging.info("Writing example %d of %d", ex_index, len(examples))
feature = classifier_utils.convert_single_example(ex_index, example,
label_list,
max_seq_length,
tokenize_fn,
FLAGS.use_bert_format)
def create_int_feature(values):
f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
return f
def create_float_feature(values):
f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))
return f
features = collections.OrderedDict()
features["input_ids"] = create_int_feature(feature.input_ids)
features["input_mask"] = create_float_feature(feature.input_mask)
features["segment_ids"] = create_int_feature(feature.segment_ids)
if label_list is not None:
features["label_ids"] = create_int_feature([feature.label_id])
else:
features["label_ids"] = create_float_feature([float(feature.label_id)])
features["is_real_example"] = create_int_feature(
[int(feature.is_real_example)])
tf_example = tf.train.Example(features=tf.train.Features(feature=features))
writer.write(tf_example.SerializeToString())
writer.close()
def main(_):
logging.set_verbosity(logging.INFO)
processors = {
"mnli_matched": MnliMatchedProcessor,
"mnli_mismatched": MnliMismatchedProcessor,
"sts-b": StsbProcessor,
"imdb": ImdbProcessor,
"yelp5": Yelp5Processor
}
task_name = FLAGS.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]()
label_list = processor.get_labels() if not FLAGS.is_regression else None
sp = spm.SentencePieceProcessor()
sp.Load(FLAGS.spiece_model_file)
def tokenize_fn(text):
text = preprocess_utils.preprocess_text(text, lower=FLAGS.uncased)
return preprocess_utils.encode_ids(sp, text)
spm_basename = os.path.basename(FLAGS.spiece_model_file)
train_file_base = "{}.len-{}.train.tf_record".format(spm_basename,
FLAGS.max_seq_length)
train_file = os.path.join(FLAGS.output_dir, train_file_base)
logging.info("Use tfrecord file %s", train_file)
train_examples = processor.get_train_examples(FLAGS.data_dir)
np.random.shuffle(train_examples)
logging.info("Num of train samples: %d", len(train_examples))
file_based_convert_examples_to_features(train_examples, label_list,
FLAGS.max_seq_length, tokenize_fn,
train_file, FLAGS.num_passes)
if FLAGS.eval_split == "dev":
eval_examples = processor.get_dev_examples(FLAGS.data_dir)
else:
eval_examples = processor.get_test_examples(FLAGS.data_dir)
logging.info("Num of eval samples: %d", len(eval_examples))
# TPU requires a fixed batch size for all batches, therefore the number
# of examples must be a multiple of the batch size, or else examples
# will get dropped. So we pad with fake examples which are ignored
# later on. These do NOT count towards the metric (all tf.metrics
# support a per-instance weight, and these get a weight of 0.0).
#
# Modified in XL: We also adopt the same mechanism for GPUs.
while len(eval_examples) % FLAGS.eval_batch_size != 0:
eval_examples.append(classifier_utils.PaddingInputExample())
eval_file_base = "{}.len-{}.{}.eval.tf_record".format(spm_basename,
FLAGS.max_seq_length,
FLAGS.eval_split)
eval_file = os.path.join(FLAGS.output_dir, eval_file_base)
file_based_convert_examples_to_features(eval_examples, label_list,
FLAGS.max_seq_length, tokenize_fn,
eval_file)
if __name__ == "__main__":
app.run(main)
|
{
"content_hash": "e2cb5eb9d4dd4925ebb0a28735e96739",
"timestamp": "",
"source": "github",
"line_count": 443,
"max_line_length": 80,
"avg_line_length": 33.45146726862303,
"alnum_prop": 0.6224441595249343,
"repo_name": "tombstone/models",
"id": "9b34ffef7c7ed66a87b8386e1675e14c11b0791d",
"size": "15508",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "official/nlp/xlnet/preprocess_classification_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1365199"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33208"
},
{
"name": "Jupyter Notebook",
"bytes": "1858048"
},
{
"name": "Makefile",
"bytes": "4763"
},
{
"name": "Python",
"bytes": "7241242"
},
{
"name": "Shell",
"bytes": "102270"
},
{
"name": "TypeScript",
"bytes": "6515"
}
],
"symlink_target": ""
}
|
import threading
import time
import math
from flask import jsonify
import datetime
import json
from data import speed_calc
from data import gear_calc
from data import gear_int_calc
from data import torque_calc
from data import engine_speed_calc
from data import fuel_consumed_calc
from data import odometer_calc
from data import fuel_level_calc
from data import heading_calc
from data import lat_calc
from data import lon_calc
class DynamicsModel(object):
def __init__(self):
self._initialize_data()
t = threading.Thread(target=self.physics_loop, name="Thread-Physics")
t.setDaemon(True)
t.start()
print("Dynamics Model initialized")
def _initialize_data(self):
self.calculations = []
self.calculations.append(speed_calc.SpeedCalc())
self.calculations.append(gear_calc.GearCalc())
self.calculations.append(gear_int_calc.GearIntCalc())
self.calculations.append(torque_calc.TorqueCalc())
self.calculations.append(engine_speed_calc.EngineSpeedCalc())
self.calculations.append(fuel_consumed_calc.FuelConsumedCalc())
self.calculations.append(odometer_calc.OdometerCalc())
self.calculations.append(fuel_level_calc.FuelLevelCalc())
self.calculations.append(heading_calc.HeadingCalc())
self.calculations.append(lat_calc.LatCalc())
self.calculations.append(lon_calc.LonCalc())
self.snapshot = {}
for data in self.calculations:
self.snapshot[data.name] = data.get()
self.delay_100Hz = datetime.timedelta(0, 0, 10000)
self.next_iterate = datetime.datetime.now() + self.delay_100Hz
self.zero_timedelta = datetime.timedelta(0, 0, 0)
self.accelerator = 0.0
self.brake = 0.0
self.steering_wheel_angle = 0.0
self.parking_brake_status = False
self.engine_running = False
self.ignition_data = 'off'
self.gear_lever = 'drive'
self.manual_trans_status = False
self.snapshot['accelerator_pedal_position'] = self.accelerator
self.snapshot['brake'] = self.brake
self.snapshot['steering_wheel_angle'] = self.steering_wheel_angle
self.snapshot['parking_brake_status'] = self.parking_brake_status
self.snapshot['engine_running'] = self.engine_running
self.snapshot['ignition_status'] = self.ignition_data
self.snapshot['brake_pedal_status'] = self.brake_pedal_status
self.snapshot['gear_lever_position'] = self.gear_lever
self.snapshot['manual_trans'] = self.manual_trans_status
self.stopped = False
def physics_loop(self):
while True:
if not self.stopped:
time_til_calc = self.next_iterate - datetime.datetime.now()
if time_til_calc > self.zero_timedelta:
time.sleep(time_til_calc.microseconds / 1000000.0)
# Assuming less than a second.
self.next_iterate = self.next_iterate + self.delay_100Hz
new_snapshot = {}
for data in self.calculations:
data.iterate(self.snapshot)
new_snapshot[data.name] = data.get()
# Store the latest user input...
new_snapshot['accelerator_pedal_position'] = self.accelerator
new_snapshot['brake'] = self.brake
new_snapshot['steering_wheel_angle'] = self.steering_wheel_angle
new_snapshot['parking_brake_status'] = self.parking_brake_status
new_snapshot['engine_running'] = self.engine_running
new_snapshot['ignition_status'] = self.ignition_data
new_snapshot['brake_pedal_status'] = self.brake_pedal_status
new_snapshot['gear_lever_position'] = self.gear_lever
new_snapshot['manual_trans'] = self.manual_trans_status
self.snapshot = new_snapshot
# Properties ---------------------
@property
def torque(self):
return self.snapshot['torque_at_transmission']
@property
def engine_speed(self):
return self.snapshot['engine_speed']
@property
def vehicle_speed(self):
return math.fabs(self.snapshot['vehicle_speed'])
@property
def brake_pedal_status(self):
return self.brake > 0.0
@property
def fuel_consumed(self):
return self.snapshot['fuel_consumed_since_restart']
@property
def odometer(self):
return self.snapshot['odometer']
@property
def fuel_level(self):
return self.snapshot['fuel_level']
@property
def lat(self):
return self.snapshot['latitude']
@property
def lon(self):
return self.snapshot['longitude']
@property
def data(self):
return json.dumps(self.snapshot)
@property
def ignition_status(self):
return self.ignition_data
@ignition_status.setter
def ignition_status(self, value):
self.ignition_data = value
if value == 'start':
self.engine_running = True
elif value == 'off':
self.engine_running = False
elif value == 'accessory':
self.engine_running = False
@property
def gear_lever_position(self):
return self.gear_lever
@gear_lever_position.setter
def gear_lever_position(self, value):
self.gear_lever = value
@property
def transmission_gear_position(self):
return self.snapshot['transmission_gear_position']
@property
def latitude(self):
return self.snapshot['latitude']
@latitude.setter
def latitude(self, value):
for data in self.calculations:
if data.name == 'latitude':
data.data = value
@property
def longitude(self):
return self.snapshot['longitude']
@longitude.setter
def longitude(self, value):
for data in self.calculations:
if data.name == 'longitude':
data.data = value
def upshift(self):
if self.manual_trans_status:
for data in self.calculations:
if data.name == "transmission_gear_int":
data.shift_up()
def downshift(self):
if self.manual_trans_status:
for data in self.calculations:
if data.name == "transmission_gear_int":
data.shift_down()
|
{
"content_hash": "0950f06f0f782673dc079285ed118623",
"timestamp": "",
"source": "github",
"line_count": 198,
"max_line_length": 80,
"avg_line_length": 32.64141414141414,
"alnum_prop": 0.6185981742224973,
"repo_name": "worldline-spain/openxc-vehicle-simulator",
"id": "7cd491e6aeee970cd7d1dffab9cbb7d8aeb412ee",
"size": "6463",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dynamics_model.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "918"
},
{
"name": "HTML",
"bytes": "7556"
},
{
"name": "JavaScript",
"bytes": "7494"
},
{
"name": "Python",
"bytes": "74343"
}
],
"symlink_target": ""
}
|
from .celery import app as celery_app
__all__ = ['celery_app']
|
{
"content_hash": "fa4937c7d7e23e646a62404548abc765",
"timestamp": "",
"source": "github",
"line_count": 3,
"max_line_length": 37,
"avg_line_length": 21.333333333333332,
"alnum_prop": 0.65625,
"repo_name": "mongkok/defocus",
"id": "1a6c551dd5f9bd987f55763e0a142c3f71580f5f",
"size": "173",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "config/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "5215"
},
{
"name": "Makefile",
"bytes": "3210"
},
{
"name": "Python",
"bytes": "39366"
},
{
"name": "Shell",
"bytes": "1712"
}
],
"symlink_target": ""
}
|
import uuid
from django.db import models
from games.managers import GameManger
class Game(models.Model):
objects = GameManger()
players = models.ManyToManyField('players.Player')
kingdom = models.JSONField(default=list)
trash_pile = models.JSONField(default=list)
game_hash = models.UUIDField()
turn_order = models.JSONField(default=list)
def save(self, *args, **kwargs):
self.game_hash = uuid.uuid4()
return super().save(*args, **kwargs)
|
{
"content_hash": "fe33b1e3e7d510ad3f90af4d5e9f85b4",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 54,
"avg_line_length": 25.736842105263158,
"alnum_prop": 0.6912065439672802,
"repo_name": "jlward/dominion",
"id": "94a4424fed770ce7632b105b1a493ddeac6f0087",
"size": "489",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "games/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20746"
},
{
"name": "Shell",
"bytes": "148"
}
],
"symlink_target": ""
}
|
from rtmbot.core import Plugin
from chatterbot import ChatBot
from plugins.console import Command
# Sessions
SESS = {}
# Init ChatBots
BOTS = ['HAL 9000', 'Wall-E', 'Agent Smith']
TRAINER='chatterbot.trainers.ChatterBotCorpusTrainer'
BOT_DICT = {B: ChatBot(B, trainer=TRAINER) for B in BOTS}
# Train based on the english corpus
#for B in BOT_DICT.values():
# B.train("chatterbot.corpus.english")
class Reply(Plugin):
def process_message(self, data):
print(data)
channel = data['channel']
if not channel.startswith("D"):
return
user = data['user']
team = data['team']
# User ID
uid = '_'.join([user,team])
bot = SESS.get(uid, None)
cmd = Command(bot=bot, bot_dict=BOT_DICT)
question = data['text'].strip()
if bot:
print(bot.name, 'is processing question:', question)
else:
print('Processing question:', question)
bot_response = cmd.run(q=question)
if cmd.error:
self.outputs.append([channel, '<BOT> {answer}'.format(answer=cmd.error)])
elif cmd.bot:
bot = cmd.bot
SESS[uid] = bot
answ_dict = dict(bot=bot.name, answer=bot_response)
self.outputs.append([channel, '<BOT@{bot}> {answer}'.format(**answ_dict)])
elif not cmd.bot:
if uid in SESS:
del SESS[uid]
self.outputs.append([channel, '<BOT> {answer}'.format(answer=bot_response)])
|
{
"content_hash": "084a74a836fe1fb1a056897170f5d9ba",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 88,
"avg_line_length": 29.647058823529413,
"alnum_prop": 0.5853174603174603,
"repo_name": "narunask/silly_chatbot",
"id": "83b47649b4ca6c3c455517a4806dc7837b48bc43",
"size": "1552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rtmbot/app/plugins/chatbot.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1592"
},
{
"name": "JavaScript",
"bytes": "255943"
},
{
"name": "Python",
"bytes": "16578"
},
{
"name": "Shell",
"bytes": "648"
}
],
"symlink_target": ""
}
|
import requests
from configparser import ConfigParser
_VTAPI = 'https://www.virustotal.com/vtapi/v2/'
def action(alert, fields, kwargs):
'''Perform a virus total lookup of a hash.
'''
config = ConfigParser()
config.read('/etc/err/virustotal.conf')
_VT_APIKEY = config.get('VirusTotal', 'apikey')
if _VT_APIKEY is None:
return 'Your VirusTotal API key has not been configured. Skipping VirusTotal action'
parameters = {
'resource' : alert.get(fields[0]),
'apikey' : _VT_APIKEY
}
r = requests.post('%sfile/report' % (_VTAPI),
data=parameters)
report = r.json()
if report.get('response_code') == 1:
return '''
\`\`\`
VirusTotal
%s
%s/%s
\`\`\`
For more information see: %s
''' % (report['scan_date'],
report['positives'],
report['total'],
report['permalink'])
else:
return '''
\`\`\`
VirusTotal
File not found.
\`\`\`
'''
|
{
"content_hash": "4e8a153befd182e4127a7a8f22eac02d",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 86,
"avg_line_length": 19.155555555555555,
"alnum_prop": 0.6473317865429234,
"repo_name": "hosom/err-autoir",
"id": "941614b81d4b3257832a45723de097879bd575f4",
"size": "862",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "actions/virustotal.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6274"
}
],
"symlink_target": ""
}
|
def license(age):
if age >= 16:
return True
else:
return False
#Makes triangle (w/ challenge)
def makes_triangle(one, two, three):
if one > two and one > three:
return two + three > one
if two > one and two > three:
return one + three > two
if three > one and three > two:
return one + two > three
#Longer Word
def longer_word(string1, string2):
if len(string1) > len(string2):
return string1
elif len(string2) > len(string1):
return string2
else:
return "Same length!"
#Max of three numbers
#I originally had this working as was probably intended (using 'else' for three), except
# max_three(10,10,9) returned 9 as the max, and this is the closest
# I could get to fixing it.
def max_three(one, two, three):
if one > two and one > three:
var_max = one
return var_max
elif two > one and two > three:
var_max = two
return var_max
elif three > one and three > two:
var_max = three
return var_max
else:
return 'None of these numbers are greater than both of the others'
#Palindrome? (w/ challenge)
def is_palindrome(word):
return word.lower() == word[::-1].lower()
#BMI (height is in inches)
def bmi(weight, height):
bmi = (weight*720) / (height ** 2)
if bmi < 17.0:
print bmi
return 'BMI is lower than the normal range.'
elif 17 <= bmi <= 25:
print bmi
return 'BMI is within the normal range.'
else:
print bmi
return 'BMI is higher than the normal range.'
return
#Challenge
def leap_year(year):
#Check if the year is a century
if str(year)[-2:] == '00':
#If True, check if year is divisible by 400
if year % 400 == 0:
print 'The year is a leap year.'
return True
else:
print 'The year is not a leap year.'
return False
#Otherwise check the year is divisible by four
else:
if year % 4 == 0:
print 'The year is a leap year.'
return True
else:
print 'The year is not a leap year.'
return False
|
{
"content_hash": "0dbcfab27ca2df8021d61c93c2f8e3b9",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 88,
"avg_line_length": 21.822429906542055,
"alnum_prop": 0.5434689507494647,
"repo_name": "Nethermaker/school-projects",
"id": "04f05aa2bdecdfbcf9603fbd3ee08f32c79cd8a2",
"size": "2345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "intro/conditional_practice.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "286112"
}
],
"symlink_target": ""
}
|
'''
Author: Hans Erik Heggem
Email: hans.erik.heggem@gmail.com
Project: Master's Thesis - Autonomous Inspection Of Wind Blades
Repository: Master's Thesis - CV (Computer Vision)
'''
import time
from Settings.Exceptions import PtGreyError
'''
@brief Pin control class.
Control a pin on the microcontroller
@param pin (uint number)
@param out_pin (True = pin control OUT, False = pin control IN (default=True))
'''
class PinControl():
def __init__(self, pin, out_pin=True):
'''CONSTRUCTOR'''
try:
import wiringpi2 as wpi
self.__wpi = wpi
except ImportError:
raise PtGreyError('wiringpi_not_available_error_msg')
self.__wpi.wiringPiSetup()
self.SetPin(pin)
if out_pin:
self.SetPinOut()
else:
self.SetPinIn()
def CheckPinControlAVailable(self):
'''
@brief Check if pin control is available
@return True/False
'''
available = True
if self.__wpi == None:
available = False
return available
def AssertPinControlAvailable(self):
'''
@brief Assert that the pin control is available
'''
if not(self.CheckPinControlAVailable()):
raise Exception(self.wiringpi_not_available_error_msg)
def GetPin(self):
'''
@brief Get the pin in control
'''
self.AssertPinControlAvailable()
return self.__pin
def GetWiringControl(self):
'''
@brief Get wiring control class instance
@return wpi
'''
self.AssertPinControlAvailable()
return self.__wpi
def SetPin(self, pin):
'''
@brief Set which pin to control
@param pin (uint number)
'''
self.__pin = pin
def SetPinOut(self):
'''
@brief Set pin control out
Default value out is low (0)
'''
self.AssertPinControlAvailable()
self.__wpi.pinMode(self.__pin, 1)
self.__out_pin = True
self.SetPinLow()
def SetPinIn(self):
'''
@brief Set pin control in
'''
self.AssertPinControlAvailable()
self.__wpi.pinMode(self.__pin, 0)
self.__out_pin = False
def CheckOutPin(self):
'''
@brief Check if pin control is set to OUT (return True) or IN (return False)
@return True/False
'''
self.AssertPinControlAvailable()
return self.__out_pin
def AssertOutPin(self):
'''
@brief Assert that pin control is set to OUT
'''
if not(self.CheckOutPin()):
raise Exception('Set pin to OUT before writing!')
def AssertInPin(self):
'''
@brief Assert that pin control is set to OUT
'''
if self.CheckOutPin():
raise Exception('Set pin to IN before reading!')
def TogglePin(self, pause=0.0):
'''
@brief Toggle pin
@param pause (insert delay between High->low (default=0))
'''
self.AssertOutPin()
self.__wpi.digitalWrite(self.__pin, 1)
if pause > 0:
time.sleep(pause)
self.__wpi.digitalWrite(self.__pin, 0)
def SetPinHigh(self):
'''
@brief Set pin high
'''
self.AssertOutPin()
self.__wpi.digitalWrite(self.__pin, 1)
def SetPinLow(self):
'''
@brief Set pin low
'''
self.AssertOutPin()
self.__wpi.digitalWrite(self.__pin, 0)
def ReadPin(self):
'''
@brief Read high/low value of pin
@return 1/0
'''
self.AssertInPin()
return self.__wpi.digitalRead(self.__pin)
def __del__(self):
'''DESTRUCTOR'''
if self.CheckPinControlAVailable():
if self.CheckOutPin():
self.SetPinLow()
|
{
"content_hash": "6bd830ad6cc31c19d1f0a9e11954bffb",
"timestamp": "",
"source": "github",
"line_count": 156,
"max_line_length": 79,
"avg_line_length": 20.762820512820515,
"alnum_prop": 0.6637851188638468,
"repo_name": "hansehe/Wind-Blade-Inspection",
"id": "26456e23ffbf9e3aa29b4cf549e43356f7efb015",
"size": "3239",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/DroneVision/DroneVision_src/hardware/PinControl.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2183232"
}
],
"symlink_target": ""
}
|
import argparse
import numpy as np
import os
from LigParGenTools import *
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog='Converter.py',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="""
SCRIPT TO CREATE CUSTOM SOLVENT BOXES FOR
OPENMM AND NAMD FROM LIGPARGEN FILES
Created on Mon Nov 14 15:10:05 2016
@author: Leela S. Dodda leela.dodda@yale.edu
@author: William L. Jorgensen Lab
if using PDB file
Usage: python CustomSolBox.py -p OCT.pdb -b 45 -r OCT
Usage: python CustomSolBox.py -p CYH.pdb -r CYH -b 28
REQUIREMENTS:
Preferably Anaconda python with following modules
argparse
numpy
"""
)
parser.add_argument(
"-p", "--pdb", help="Submit PDB file from CHEMSPIDER or PubChem", type=str)
parser.add_argument(
"-r", "--sol_name", help="Submit PDB file from CHEMSPIDER or PubChem", type=str)
parser.add_argument("-b", "--box_size", type=float,
help="SIZE of the CUBIC box in ANGSTROM")
parser.add_argument("-ns", "--num_solv", type=int,
help="NUMBER of Molecules in CUBIC box")
args = parser.parse_args()
try:
BOX_MAKER(args.pdb, args.box_size,args.sol_name, args.num_solv)
except TypeError:
print('For Help: python CustomSolBox.py -h')
|
{
"content_hash": "b444d5d0dae6e9c2a3ac7d878fc0b006",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 88,
"avg_line_length": 33.325,
"alnum_prop": 0.6564141035258815,
"repo_name": "leelasd/LigParGenTools",
"id": "0db9c41e67c1c63f4f035baf3aa4bbb0e1b070fd",
"size": "1333",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CustomSolBox.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14398"
}
],
"symlink_target": ""
}
|
import testtools
from tempest.api.telemetry import base
from tempest import config
from tempest import test
CONF = config.CONF
class TelemetryNotificationAPITestJSON(base.BaseTelemetryTest):
_interface = 'json'
@classmethod
def resource_setup(cls):
if CONF.telemetry.too_slow_to_test:
raise cls.skipException("Ceilometer feature for fast work mysql "
"is disabled")
super(TelemetryNotificationAPITestJSON, cls).resource_setup()
@test.attr(type="gate")
@testtools.skipIf(not CONF.service_available.nova,
"Nova is not available.")
def test_check_nova_notification(self):
resp, body = self.create_server()
self.assertEqual(resp.status, 202)
query = ('resource', 'eq', body['id'])
for metric in self.nova_notifications:
self.await_samples(metric, query)
@test.attr(type="smoke")
@test.services("image")
@testtools.skipIf(not CONF.image_feature_enabled.api_v1,
"Glance api v1 is disabled")
@test.skip_because(bug='1351627')
def test_check_glance_v1_notifications(self):
_, body = self.create_image(self.image_client)
self.image_client.update_image(body['id'], data='data')
query = 'resource', 'eq', body['id']
self.image_client.delete_image(body['id'])
for metric in self.glance_notifications:
self.await_samples(metric, query)
@test.attr(type="smoke")
@test.services("image")
@testtools.skipIf(not CONF.image_feature_enabled.api_v2,
"Glance api v2 is disabled")
@test.skip_because(bug='1351627')
def test_check_glance_v2_notifications(self):
_, body = self.create_image(self.image_client_v2)
self.image_client_v2.store_image(body['id'], "file")
self.image_client_v2.get_image_file(body['id'])
query = 'resource', 'eq', body['id']
for metric in self.glance_v2_notifications:
self.await_samples(metric, query)
class TelemetryNotificationAPITestXML(TelemetryNotificationAPITestJSON):
_interface = 'xml'
|
{
"content_hash": "f74165158fae5abd954311a4235c3f67",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 77,
"avg_line_length": 32.25373134328358,
"alnum_prop": 0.6376677464136974,
"repo_name": "nikolay-fedotov/tempest",
"id": "42e2a2daa683dc4902eae8db6a9e817d6b8d8dea",
"size": "2734",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tempest/api/telemetry/test_telemetry_notification_api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
}
|
import os
import wx
import wx.calendar as wxcal
from fittopframe import FitTopFrame
class FitMainWindow(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title, wx.DefaultPosition, (250, 150))
self.start_date = ""
self.end_date = ""
self.setEpochEternal()
self.SetMinSize((750, 300))
self.Center()
self.panel = wx.Panel(self, -1)
# top level sizer.
self.main_grid_bag_sizer = wx.GridBagSizer(9,9)
self.dir_name = wx.TextCtrl(self.panel, wx.ID_ANY, size=(180, 20))
self.dir_name.SetValue("Enter")
self.log_path = wx.Button(self.panel, wx.ID_ANY, "Logs Path")
self.log_path.Bind(wx.EVT_BUTTON, self.openDir)
# Check grid_bag to select Top/Free/IOStat
self.top_cb = wx.CheckBox(self.panel, wx.ID_ANY, 'Top')
self.top_cb.SetValue(False)
self.free_cb = wx.CheckBox(self.panel, wx.ID_ANY, 'Free')
self.free_cb.SetValue(False)
self.launch_button = wx.Button(self.panel, wx.ID_ANY, "Launch")
self.launch_button.Bind(wx.EVT_BUTTON,self.LaunchTopFrame)
self.start_static_box = wx.StaticBox(self.panel, wx.ID_ANY, "Start Date")
self.start_static_box_sizer = wx.StaticBoxSizer(self.start_static_box,wx.VERTICAL)
self.end_static_box = wx.StaticBox(self.panel, wx.ID_ANY, "End Date")
self.end_static_box_sizer = wx.StaticBoxSizer(self.end_static_box,wx.VERTICAL)
self.start_cal = wxcal.CalendarCtrl(self.panel, wx.ID_ANY, wx.DateTime.Today(),
style=wxcal.CAL_SEQUENTIAL_MONTH_SELECTION)
self.start_cal.Bind(wxcal.EVT_CALENDAR_SEL_CHANGED,self.SetStart)
self.end_cal = wxcal.CalendarCtrl(self.panel, wx.ID_ANY, wx.DateTime.Today(),
style=wxcal.CAL_SEQUENTIAL_MONTH_SELECTION)
self.end_cal.Bind(wxcal.EVT_CALENDAR_SEL_CHANGED,self.SetEnd)
self.start_static_box_sizer.Add(self.start_cal, proportion = 1, flag = wx.ALL | wx.EXPAND, border = 10)
self.end_static_box_sizer.Add(self.end_cal, proportion = 1, flag = wx.ALL | wx.EXPAND, border = 10)
self.end_cal.Disable()
self.start_cal.Disable()
self.date_sel = wx.CheckBox(self.panel, wx.ID_ANY, 'Date Range')
self.date_sel.SetValue(False)
self.date_sel.Bind(wx.EVT_CHECKBOX, self.calenderEnDis)
# Add to the correct grid_bag.
self.main_grid_bag_sizer.Add(self.dir_name, pos=(0,0), span=(1,3), flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.log_path, (0,3), (1,1), flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.launch_button, (1,3), (1,1), flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.top_cb, (1,0), (1,1),flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.free_cb, (1,1), (1,1), flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.start_static_box_sizer, (2,0),wx.DefaultSpan,flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.end_static_box_sizer, (2,1),wx.DefaultSpan, flag = wx.EXPAND)
self.main_grid_bag_sizer.Add(self.date_sel, (2,3), (1,1), flag = wx.EXPAND)
self.panel.SetSizerAndFit(self.main_grid_bag_sizer)
def calenderEnDis(self, event):
cb = event.GetEventObject()
if cb.GetValue() == True:
self.end_cal.Enable()
self.start_cal.Enable()
else:
self.end_cal.Disable()
self.start_cal.Disable()
self.setEpochEternal()
def setEpochEternal(self):
dt = wx.DateTime().Set(day=1,month=1,year=1900)
self.start_date = str(dt)
dt.Set(day=1,month=1,year=2050)
self.end_date = str(dt)
def SetStart(self, event):
self.start_date = str(event.GetDate())
def SetEnd(self, event):
self.end_date = str(event.GetDate())
def openDir(self, event):
dlg = wx.DirDialog(self, "Choose a directory:", style=wx.DD_DEFAULT_STYLE | wx.DD_NEW_DIR_BUTTON)
if dlg.ShowModal() == wx.ID_OK:
self.dir_name.SetValue(dlg.GetPath())
dlg.Destroy()
def LaunchTopFrame(self,event):
if os.path.exists(self.dir_name.GetValue()) == True:
self.top_frame = FitTopFrame(None, wx.ID_ANY, self.dir_name.GetValue(), self.start_date, self.end_date, title='Top Window')
self.top_frame.Centre(True)
self.frames = dict()
self.top_frame.Show(True)
else:
self.showErrorDialog("Directory does not exist.")
def showErrorDialog(self, message, closeFrame = False):
dia = wx.MessageDialog(self, message, 'Error', style = wx.OK|wx.ICON_ERROR)
dia.ShowModal()
if closeFrame == True:
self.Destroy()
else:
dia.Destroy()
|
{
"content_hash": "13b8f4147673fd51a7ab44a99041b8e9",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 135,
"avg_line_length": 41.78632478632478,
"alnum_prop": 0.6076907343014931,
"repo_name": "karthikdevel/fit",
"id": "454ade21ba097c9511e1510e6a6cc55e1cb20119",
"size": "4889",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gui/wxpython/fitmainwindow.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "26629"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('account', '0005_auto_20161027_1111'),
]
operations = [
migrations.CreateModel(
name='Activation',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('activation_key', models.CharField(max_length=40, verbose_name='activation key')),
('created', models.DateTimeField(auto_now_add=True, verbose_name='created')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='activation', to=settings.AUTH_USER_MODEL)),
],
),
]
|
{
"content_hash": "cd4a49f93c2e7e8b62351dec3751062e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 148,
"avg_line_length": 35.791666666666664,
"alnum_prop": 0.6309662398137369,
"repo_name": "katyaeka2710/python2017005",
"id": "2d603aa8c9f40bd92fa758ffc91608a50c1b5ad0",
"size": "932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "account/migrations/0006_activation.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "70865"
},
{
"name": "HTML",
"bytes": "99904"
},
{
"name": "JavaScript",
"bytes": "74694"
},
{
"name": "Python",
"bytes": "139942"
}
],
"symlink_target": ""
}
|
from mako.template import Template
import uuid
from os.path import join, dirname
from collections import namedtuple
import requests
from owslib.wps import WebProcessingService
from pyramid.settings import asbool
from pyramid.events import NewRequest
from phoenix.db import mongodb
import logging
LOGGER = logging.getLogger("PHOENIX")
def includeme(config):
# catalog service
def add_catalog(event):
settings = event.request.registry.settings
if settings.get('catalog') is None:
try:
settings['catalog'] = catalog_factory(config.registry)
except Exception:
LOGGER.warning('Could not connect catalog service.')
event.request.catalog = settings.get('catalog')
config.add_subscriber(add_catalog, NewRequest)
def catalog_factory(registry):
db = mongodb(registry)
return MongodbCatalog(db.catalog)
def _fetch_wps_metadata(url, title=None):
"""Fetch capabilities metadata from wps service and return record dict."""
wps = WebProcessingService(url, verify=False, skip_caps=False)
record = dict(
title=title or wps.identification.title or "Unknown",
abstract=getattr(wps.identification, 'abstract', ''),
url=wps.url,
creator=wps.provider.name,
keywords=getattr(wps.identification, 'keywords', []),
rights=getattr(wps.identification, 'accessconstraints', ''),
)
return record
class Catalog(object):
def get_record_by_id(self, identifier):
raise NotImplementedError
def delete_record(self, identifier):
raise NotImplementedError
def insert_record(self, record):
raise NotImplementedError
def harvest(self, url, service_title=None, public=False):
raise NotImplementedError
def get_services(self, maxrecords=100):
raise NotImplementedError
def clear_services(self):
raise NotImplementedError
def doc2record(document):
"""Converts ``document`` from mongodb to a ``Record`` object."""
record = None
if isinstance(document, dict):
if '_id' in document:
# _id field not allowed in record
del document["_id"]
record = namedtuple('Record', list(document.keys()))(*list(document.values()))
return record
class MongodbCatalog(Catalog):
"""Implementation of a Catalog with MongoDB."""
def __init__(self, collection):
self.collection = collection
def get_record_by_id(self, identifier):
return doc2record(self.collection.find_one({'identifier': identifier}))
def delete_record(self, identifier):
self.collection.delete_one({'identifier': identifier})
def insert_record(self, record):
record['identifier'] = uuid.uuid4().hex
self.collection.save(record)
def harvest(self, url, service_title=None, public=False):
try:
# fetch metadata
record = _fetch_wps_metadata(url, title=service_title)
record['public'] = public
self.insert_record(record)
except Exception:
LOGGER.warning("could not harvest metadata")
raise Exception("could not harvest metadata")
def get_services(self, maxrecords=100):
return [doc2record(doc) for doc in self.collection.find()]
def clear_services(self):
self.collection.drop()
|
{
"content_hash": "8f066a7aa1e9536fe530166393de406e",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 86,
"avg_line_length": 29.884955752212388,
"alnum_prop": 0.6665679597275689,
"repo_name": "bird-house/pyramid-phoenix",
"id": "9aed8a656e34c1e5e09f0dbe565b0050cf8f07b4",
"size": "3377",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phoenix/catalog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "32044"
},
{
"name": "HTML",
"bytes": "32624"
},
{
"name": "JavaScript",
"bytes": "980055"
},
{
"name": "Makefile",
"bytes": "4102"
},
{
"name": "Mako",
"bytes": "4118"
},
{
"name": "Python",
"bytes": "153896"
},
{
"name": "Shell",
"bytes": "1121"
}
],
"symlink_target": ""
}
|
import flask
from designate.openstack.common import log as logging
from designate.central import rpcapi as central_rpcapi
LOG = logging.getLogger(__name__)
central_api = central_rpcapi.CentralAPI()
blueprint = flask.Blueprint('quotas', __name__)
@blueprint.route('/quotas/<tenant_id>', methods=['GET'])
def get_quotas(tenant_id):
context = flask.request.environ.get('context')
quotas = central_api.get_quotas(context, tenant_id)
return flask.jsonify(quotas)
@blueprint.route('/quotas/<tenant_id>', methods=['PUT', 'POST'])
def set_quota(tenant_id):
context = flask.request.environ.get('context')
values = flask.request.json
for resource, hard_limit in values.items():
central_api.set_quota(context, tenant_id, resource, hard_limit)
quotas = central_api.get_quotas(context, tenant_id)
return flask.jsonify(quotas)
@blueprint.route('/quotas/<tenant_id>', methods=['DELETE'])
def reset_quotas(tenant_id):
context = flask.request.environ.get('context')
central_api.reset_quotas(context, tenant_id)
return flask.Response(status=200)
|
{
"content_hash": "0b1e9578a910252ca8c0f7f092688189",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 71,
"avg_line_length": 29.54054054054054,
"alnum_prop": 0.7136322049405307,
"repo_name": "richm/designate",
"id": "4860a4ca2998655bee656a95a420d27e260a94d3",
"size": "1742",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "designate/api/v1/extensions/quotas.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1272656"
},
{
"name": "Shell",
"bytes": "3809"
}
],
"symlink_target": ""
}
|
import gevent
import gevent.monkey
gevent.monkey.patch_all()
import os
import sys
import uuid
import logging
import coverage
import testtools
from testtools.matchers import Equals, MismatchError, Not, Contains
from testtools import content, content_type, ExpectedException
import keystoneclient.v2_0.client as keystone
from keystonemiddleware import auth_token
from vnc_api.vnc_api import *
import cfgm_common
from cfgm_common import vnc_cgitb
vnc_cgitb.enable(format='text')
sys.path.append('../common/tests')
import test_utils
import test_common
import test_case
from test_perms2 import (User, set_perms, vnc_read_obj, vnc_aal_create,
vnc_aal_add_rule, token_from_user_info)
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
# This is needed for VncApi._authenticate invocation from within Api server.
# We don't have access to user information so we hard code admin credentials.
def ks_admin_authenticate(self, response=None, headers=None):
rval = token_from_user_info('admin', 'admin', 'default-domain', 'cloud-admin')
new_headers = {}
new_headers['X-AUTH-TOKEN'] = rval
return new_headers
class TestUserVisible(test_case.ApiServerTestCase):
domain_name = 'default-domain'
fqdn = [domain_name]
@classmethod
def setUpClass(cls):
cls.console_handler = logging.StreamHandler()
cls.console_handler.setLevel(logging.DEBUG)
logger.addHandler(cls.console_handler)
extra_mocks = [(keystone.Client,
'__new__', test_utils.FakeKeystoneClient),
(vnc_api.vnc_api.VncApi,
'_authenticate', ks_admin_authenticate),
(auth_token, 'AuthProtocol',
test_utils.FakeAuthProtocol)]
extra_config_knobs = [
('DEFAULTS', 'aaa_mode', 'rbac'),
('DEFAULTS', 'cloud_admin_role', 'cloud-admin'),
('DEFAULTS', 'global_read_only_role', 'read-only-role'),
('DEFAULTS', 'auth', 'keystone'),
]
super(TestUserVisible, cls).setUpClass(extra_mocks=extra_mocks,
extra_config_knobs=extra_config_knobs)
@classmethod
def tearDownClass(cls, *args, **kwargs):
logger.removeHandler(cls.console_handler)
super(TestUserVisible, cls).tearDownClass(*args, **kwargs)
def setUp(self):
super(TestUserVisible, self).setUp()
ip = self._api_server_ip
port = self._api_server_port
kc = keystone.Client(username='admin', password='contrail123',
tenant_name='admin',
auth_url='http://127.0.0.1:5000/v2.0')
# prepare token before vnc api invokes keystone
self.test = User(ip, port, kc, 'test', 'test123', 'test-role', 'admin-%s' % self.id())
self.admin = User(ip, port, kc, 'admin', 'contrail123', 'cloud-admin', 'admin-%s' % self.id())
def test_user_visible_perms(self):
user = self.test
project_obj = Project(user.project)
project_obj.uuid = user.project_uuid
self.admin.vnc_lib.project_create(project_obj)
# read projects back
user.project_obj = vnc_read_obj(self.admin.vnc_lib,
'project', obj_uuid = user.project_uuid)
user.domain_id = user.project_obj.parent_uuid
user.vnc_lib.set_domain_id(user.project_obj.parent_uuid)
logger.info( 'Change owner of project %s to %s' % (user.project, user.project_uuid))
set_perms(user.project_obj, owner=user.project_uuid, share = [])
self.admin.vnc_lib.project_update(user.project_obj)
# allow permission to create all objects
user.proj_rg = vnc_aal_create(self.admin.vnc_lib, user.project_obj)
vnc_aal_add_rule(self.admin.vnc_lib, user.proj_rg,
rule_str = '* %s:CRUD' % user.role)
ipam_obj = NetworkIpam('ipam-%s' %(self.id()), user.project_obj)
user.vnc_lib.network_ipam_create(ipam_obj)
ipam_sn_v4 = IpamSubnetType(subnet=SubnetType('11.1.1.0', 24))
kwargs = {'id_perms':{'user_visible': False}}
vn = VirtualNetwork('vn-%s' %(self.id()), user.project_obj, **kwargs)
vn.add_network_ipam(ipam_obj, VnSubnetsType([ipam_sn_v4]))
#create virtual-network by non-admin user should fail when user_visible -> 'false'
with ExpectedException(BadRequest) as e:
user.vnc_lib.virtual_network_create(vn)
#create virtual-network by admin user
self.admin.vnc_lib.virtual_network_create(vn)
vn_fq_name = vn.get_fq_name()
#delete virtual-network by non-admin user should fail when user_visible -> 'false'
with ExpectedException(NoIdError) as e:
user.vnc_lib.virtual_network_delete(fq_name = vn_fq_name)
#update virtual-network by non-admin user should fail when user_visible -> 'false'
vn.display_name = "test_perms"
with ExpectedException(NoIdError) as e:
user.vnc_lib.virtual_network_update(vn)
#end test_user_visible_perms
# class TestUserVisible
|
{
"content_hash": "59860eb27b05f92809cb2035c03556f3",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 102,
"avg_line_length": 40.023622047244096,
"alnum_prop": 0.6435176077119811,
"repo_name": "eonpatapon/contrail-controller",
"id": "7e66a8050a4d24fd5791382d8c9d973bedbffa87",
"size": "5153",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/config/api-server/vnc_cfg_api_server/tests/test_perms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "722794"
},
{
"name": "C++",
"bytes": "22097574"
},
{
"name": "GDB",
"bytes": "39260"
},
{
"name": "Go",
"bytes": "47213"
},
{
"name": "Java",
"bytes": "91653"
},
{
"name": "Lua",
"bytes": "13345"
},
{
"name": "PowerShell",
"bytes": "1810"
},
{
"name": "Python",
"bytes": "7240671"
},
{
"name": "Roff",
"bytes": "41295"
},
{
"name": "Ruby",
"bytes": "13596"
},
{
"name": "Shell",
"bytes": "53994"
}
],
"symlink_target": ""
}
|
import os
VERSION = (0, 1, 1, 'alpha', 0)
def get_version():
version = '%d.%d.%d' % (VERSION[0], VERSION[1], VERSION[2])
return version
def get_logger(name='paloma'):
# - Python logging api
import logging
return logging.getLogger(name)
def report(msg='',exception=None,level='error',name='paloma'):
''' error reporting
'''
import traceback
if exception:
msg = str(exception) + "\n" + str(traceback.format_exc() )
getattr( get_logger(name) ,level,get_logger().error)( msg )
def run(project_dir):
import sys,os
sys.path.append(os.path.dirname( project_dir ) )
sys.path.append( project_dir )
#: argv[1] is manage.py command
sys.argv[1] = 'bouncer'
from django.core.management import execute_manager
import imp
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write(str(sys.path))
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
execute_manager(settings)
|
{
"content_hash": "01198a14c6cb7c81e93e98b8931e2c22",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 220,
"avg_line_length": 28.727272727272727,
"alnum_prop": 0.6384493670886076,
"repo_name": "hdknr/paloma",
"id": "9f49ab614104d3b927d9648783b3fcf60b62d7c1",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/paloma/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "3054"
},
{
"name": "Python",
"bytes": "162971"
},
{
"name": "Shell",
"bytes": "1786"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('project', '0056_auto_20180614_0734'),
]
operations = [
migrations.AddField(
model_name='application',
name='published_timestamp',
field=models.DateTimeField(blank=True, null=True),
),
]
|
{
"content_hash": "3480b136445cd4917d974fc8c88e5f29",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 62,
"avg_line_length": 22.555555555555557,
"alnum_prop": 0.6108374384236454,
"repo_name": "unicef/un-partner-portal",
"id": "bf17e0a2b2b85affd3e9fd77936b84f0b54292f6",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "backend/unpp_api/apps/project/migrations/0057_application_published_timestamp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "468629"
},
{
"name": "Dockerfile",
"bytes": "2303"
},
{
"name": "HTML",
"bytes": "49027"
},
{
"name": "JavaScript",
"bytes": "2199879"
},
{
"name": "Python",
"bytes": "1322681"
},
{
"name": "Shell",
"bytes": "4734"
},
{
"name": "Smarty",
"bytes": "751"
}
],
"symlink_target": ""
}
|
from modularodm import fields
from framework.mongo import (
ObjectId,
StoredObject,
utils as mongo_utils
)
from website.util import api_v2_url
@mongo_utils.unique_on(['text'])
class Subject(StoredObject):
_id = fields.StringField(primary=True, default=lambda: str(ObjectId()))
text = fields.StringField(required=True)
parents = fields.ForeignField('subject', list=True)
children = fields.ForeignField('subject', list=True)
@property
def absolute_api_v2_url(self):
return api_v2_url('taxonomies/{}/'.format(self._id))
@property
def child_count(self):
return len(self.children)
def get_absolute_url(self):
return self.absolute_api_v2_url
|
{
"content_hash": "5f2ccdde248f0d2860c5334140debf99",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 75,
"avg_line_length": 26.444444444444443,
"alnum_prop": 0.6862745098039216,
"repo_name": "emetsger/osf.io",
"id": "87ae382f06f6f69670a57c7f336845f1850cac77",
"size": "714",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "website/project/taxonomies/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "160226"
},
{
"name": "HTML",
"bytes": "121662"
},
{
"name": "JavaScript",
"bytes": "1672685"
},
{
"name": "Mako",
"bytes": "660837"
},
{
"name": "Perl",
"bytes": "13885"
},
{
"name": "Python",
"bytes": "6189751"
}
],
"symlink_target": ""
}
|
from django.conf import settings
FIRSTCLASS_EMAIL_BACKEND = getattr(settings, 'FIRSTCLASS_EMAIL_BACKEND', 'django.core.mail.backends.smtp.EmailBackend')
FIRSTCLASS_MIDDLEWARE = getattr(settings, 'FIRSTCLASS_MIDDLEWARE', (
'firstclass.middleware.online.ViewOnlineMiddleware',
'firstclass.middleware.alternative.MultiAlternativesMiddleware',
'firstclass.middleware.text.PlainTextMiddleware',
))
|
{
"content_hash": "cdd613b2ab446c99bd4eeb064e31f739",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 119,
"avg_line_length": 50.75,
"alnum_prop": 0.8054187192118226,
"repo_name": "amccloud/django-firstclass",
"id": "78003e5bacb23700c746dc4a1db3c28b9a71491a",
"size": "406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "firstclass/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7379"
}
],
"symlink_target": ""
}
|
"""
A script to create hyphenated redirects for n or m dash pages.
This script collects pages with n or m dash in their title and creates
a redirect from the corresponding hyphenated version. If the redirect
already exists, it is skipped.
Use -reversed option to create n dash redirects for hyphenated pages.
Some communities can decide to use hyphenated titles for templates, modules
or categories and in this case this option can be handy.
The following parameters are supported:
-always don't ask for confirmation when putting a page
-reversed create n dash redirects for hyphenated pages
-summary: set custom summary message for the edit
The following generators and filters are supported:
¶ms;
"""
#
# (C) Bináris, 2012
# (C) Pywikibot team, 2012-2017
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n, pagegenerators
from pywikibot.bot import (MultipleSitesBot, ExistingPageBot,
NoRedirectPageBot)
from pywikibot.tools.formatter import color_format
# This is required for the text that is shown when you run this script
# with the parameter -help.
docuReplacements = {
'¶ms;': pagegenerators.parameterHelp
}
class DashRedirectBot(
MultipleSitesBot, # A bot working on multiple sites
ExistingPageBot, # CurrentPageBot which only treats existing pages
NoRedirectPageBot # CurrentPageBot which only treats non-redirects
):
"""Bot to create hyphenated or dash redirects."""
def __init__(self, generator, **kwargs):
"""
Constructor.
@param generator: the page generator that determines which pages
to work on
@type generator: generator
"""
# -always option is predefined by BaseBot class
self.availableOptions.update({
'summary': None, # custom bot summary
'reversed': False, # switch bot behavior
})
# call constructor of the super class
super(DashRedirectBot, self).__init__(site=True, **kwargs)
# assign the generator to the bot
self.generator = generator
def treat_page(self):
"""Do the magic."""
# set origin
origin = self.current_page.title()
site = self.current_page.site
# create redirect title
if not self.getOption('reversed'):
redir = pywikibot.Page(site, origin.replace('–', '-')
.replace('—', '-'))
else:
redir = pywikibot.Page(site, origin.replace('-', '–'))
# skip unchanged
if redir.title() == origin:
pywikibot.output('No need to process %s, skipping…'
% redir.title())
# suggest -reversed parameter
if '-' in origin and not self.getOption('reversed'):
pywikibot.output('Consider using -reversed parameter '
'for this particular page')
else:
# skip existing
if redir.exists():
pywikibot.output('%s already exists, skipping…'
% redir.title())
else:
# confirm and save redirect
if self.user_confirm(
color_format(
'Redirect from {lightblue}{0}{default} doesn\'t exist '
'yet.\nDo you want to create it?',
redir.title())):
# If summary option is None, it takes the default
# i18n summary from i18n subdirectory with summary key.
if self.getOption('summary'):
summary = self.getOption('summary')
else:
summary = i18n.twtranslate(site,
'ndashredir-create',
{'title': origin})
redir.set_redirect_target(self.current_page, create=True,
summary=summary)
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
options = {}
# Process global arguments to determine desired site
local_args = pywikibot.handle_args(args)
# This factory is responsible for processing command line arguments
# that are also used by other scripts and that determine on which pages
# to work on.
genFactory = pagegenerators.GeneratorFactory()
# Parse command line arguments
for arg in local_args:
# Catch the pagegenerators options
if genFactory.handleArg(arg):
continue # nothing to do here
# Now pick up custom options
arg, sep, value = arg.partition(':')
option = arg[1:]
if option == 'summary':
options[option] = value
# Take the remaining options as booleans.
# Output a hint if they aren't pre-defined in the bot class
else:
options[option] = True
# The preloading option is responsible for downloading multiple pages
# from the wiki simultaneously.
gen = genFactory.getCombinedGenerator(preload=True)
if gen:
# pass generator and private options to the bot
bot = DashRedirectBot(gen, **options)
bot.run() # guess what it does
return True
else:
pywikibot.bot.suggest_help(missing_generator=True)
return False
if __name__ == '__main__':
main()
|
{
"content_hash": "e400de3bb24e92592e437736ceb43352",
"timestamp": "",
"source": "github",
"line_count": 174,
"max_line_length": 79,
"avg_line_length": 33.10344827586207,
"alnum_prop": 0.5930555555555556,
"repo_name": "hasteur/g13bot_tools_new",
"id": "70696e47d1dea964d35b435835bf3307ea0b5aa8",
"size": "5813",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/ndashredir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "HTML",
"bytes": "1365"
},
{
"name": "Python",
"bytes": "4512430"
},
{
"name": "Shell",
"bytes": "4824"
}
],
"symlink_target": ""
}
|
"""pyNRC - Python ETC and Simulator for JWST NIRCam"""
# Import libraries
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from astropy.table import Table
from astropy.io import ascii
from webbpsf_ext.webbpsf_ext_core import NIRCam_ext
from .nrc_utils import *
from .detops import det_timing, multiaccum, nrc_header
from webbpsf_ext.webbpsf_ext_core import _check_list
from tqdm.auto import trange, tqdm
import pysiaf
from pysiaf import rotations
from . import conf
from .logging_utils import setup_logging
import logging
_log = logging.getLogger('pynrc')
__epsilon = np.finfo(float).eps
class DetectorOps(det_timing):
"""
Class to hold detector operations information. Includes SCA attributes such as
detector names and IDs as well as :class:`multiaccum` class for ramp settings.
Parameters
----------------
detector : int, str
NIRCam detector ID (481-490) or SCA ID (A1-B5).
wind_mode : str
Window mode type 'FULL', 'STRIPE', 'WINDOW'.
xpix : int
Size of window in x-pixels for frame time calculation.
ypix : int
Size of window in y-pixels for frame time calculation.
x0 : int
Lower-left x-coord position of detector window.
y0 : int
Lower-left y-coord position of detector window.
nff : int
Number of fast row resets.
Keyword Args
------------
read_mode : str
NIRCam Ramp Readout mode such as 'RAPID', 'BRIGHT1', etc.
nint : int
Number of integrations (ramps).
ngroup : int
Number of groups in a integration.
nf : int
Number of frames per group.
nd1 : int
Number of drop frame after reset (before first group read).
nd2 : int
Number of drop frames within a group (ie., groupgap).
nd3 : int
Number of drop frames after final read frame in ramp.
Examples
--------
Use kwargs functionality to pass keywords to the multiaccum class.
Send via a dictionary of keywords and values:
>>> kwargs = {'read_mode':'RAPID', 'nint':5, 'ngroup':10}
>>> d = DetectorOps(**kwargs)
Set the keywords directly:
>>> d = DetectorOps(read_mode='RAPID', nint=5, ngroup=10)
"""
def __init__(self, detector=481, wind_mode='FULL', xpix=2048, ypix=2048,
x0=0, y0=0, nff=None, **kwargs):
super().__init__(wind_mode=wind_mode, xpix=xpix, ypix=ypix,
x0=x0, y0=y0, mode='JWST', nff=nff, **kwargs)
# Typical values for SW/LW detectors that get saved based on SCA ID.
# After setting the SCA ID, these various parameters can be updated,
# however they will be reset whenever the SCA ID is modified.
# - Pixel Scales in arcsec/pix
# - Well saturation level in e-
# - Typical dark current values in e-/sec (ISIM CV3)
# - Read Noise in e-
# - IPC and PPC in %
# - p_excess: Parameters that describe the excess variance observed in
# effective noise plots.
self._properties_SW = {'pixel_scale':pixscale_SW, 'dark_current':0.002, 'read_noise':11.5,
'IPC':0.54, 'PPC':0.09, 'p_excess':(1.0,5.0), 'ktc':37.6,
'well_level':105e3, 'well_level_old':81e3}
self._properties_LW = {'pixel_scale':pixscale_LW, 'dark_current':0.034, 'read_noise':10.0,
'IPC':0.60, 'PPC':0.19, 'p_excess':(1.5,10.0), 'ktc':36.8,
'well_level':83e3, 'well_level_old':75e3}
# Automatically set the pixel scale based on detector selection
self.auto_pixscale = True
self._gain_list = {481:2.07, 482:2.01, 483:2.16, 484:2.01, 485:1.83,
486:2.00, 487:2.42, 488:1.93, 489:2.30, 490:1.85}
self._scaids = {481:'A1', 482:'A2', 483:'A3', 484:'A4', 485:'A5',
486:'B1', 487:'B2', 488:'B3', 489:'B4', 490:'B5'}
# Allow user to specify name using either SCA ID or Detector ID (ie., 481 or 'A1')
try: # First, attempt to set SCA ID
self.scaid = detector
except ValueError:
try: # If that doesn't work, then try to set Detector ID
self.detid = get_detname(detector)[3:]
except ValueError: # If neither work, raise ValueError exception
raise ValueError("Invalid detector: {0} \n\tValid names are: {1},\n\t{2}" \
.format(detector, ', '.join(self.detid_list), \
', '.join(str(e) for e in self.scaid_list)))
# For full arrays number of resets in first integration is 0
# self.wind_mode = wind_mode
_log.info('Initializing SCA {}/{}'.format(self.scaid,self.detid))
@property
def wind_mode(self):
"""Window mode attribute"""
return self._wind_mode
@wind_mode.setter
def wind_mode(self, value):
"""Set Window mode attribute"""
self._wind_mode = value
self.multiaccum.nr1 = 0 if value=='FULL' else 1
@property
def scaid(self):
"""Selected SCA ID from detectors in the `scaid_list` attribute. 481, 482, etc."""
return self._scaid
@property
def detid(self):
"""Selected Detector ID from detectors in the `detid_list` attribute. A1, A2, etc."""
return self._detid
@property
def detname(self):
"""Selected Detector ID from detectors in the `scaid_list` attribute. NRCA1, NRCA2, etc."""
return self._detname
# Used for setting the SCA ID then updating all the other detector properties
@scaid.setter
def scaid(self, value):
"""Set SCA ID (481, 482, ..., 489, 490). Automatically updates other relevant attributes."""
_check_list(value, self.scaid_list, var_name='scaid')
self._scaid = value
self._detid = self._scaids.get(self._scaid)
# Detector Name (as stored in FITS headers): NRCA1, NRCALONG, etc.
if self.channel=='LW': self._detname = 'NRC' + self.module + 'LONG'
else: self._detname = 'NRC' + self._detid
# Select various detector properties (pixel scale, dark current, read noise, etc)
# depending on LW or SW detector
dtemp = self._properties_LW if self.channel=='LW' else self._properties_SW
if self.auto_pixscale:
self.pixelscale = dtemp['pixel_scale']
self.ktc = dtemp['ktc']
self.dark_current = dtemp['dark_current']
self.read_noise = dtemp['read_noise']
self.IPC = dtemp['IPC']
self.PPC = dtemp['PPC']
self.p_excess = dtemp['p_excess']
self.well_level = dtemp['well_level']
self.gain = self._gain_list.get(self._scaid, 2.0)
# Similar to scaid.setter, except if detector ID is specified.
@detid.setter
def detid(self, value):
"""Set detector ID (A1, A2, ..., B4, B5). Automatically updates other relevant attributes."""
if 'NRC' in value:
value = value[3:]
_check_list(value, self.detid_list, var_name='detid')
# Switch dictionary keys and values, grab the corresponding SCA ID,
# and then call scaid.setter
newdict = {y:x for x,y in self._scaids.items()}
self.scaid = newdict.get(value)
@property
def scaid_list(self):
"""Allowed SCA IDs"""
return sorted(list(self._scaids.keys()))
@property
def detid_list(self):
"""Allowed Detector IDs"""
return sorted(list(self._scaids.values()))
@property
def module(self):
"""NIRCam modules A or B (inferred from detector ID)"""
return self._detid[0]
@property
def channel(self):
"""Detector channel 'SW' or 'LW' (inferred from detector ID)"""
return 'LW' if self.detid.endswith('5') else 'SW'
def xtalk(self, file_path=None):
"""Detector cross talk information"""
if file_path is None:
file = 'xtalk20150303g0.errorcut.txt'
file_path = os.path.join(conf.PYNRC_PATH, 'sim_params', file)
xt_coeffs = ascii.read(file_path, header_start=0)
ind = xt_coeffs['Det'] == self.detid
return xt_coeffs[ind]
def pixel_noise(self, fsrc=0.0, fzodi=0.0, fbg=0.0, rn=None, ktc=None, idark=None,
p_excess=None, ng=None, nf=None, verbose=False, **kwargs):
"""Noise values per pixel.
Return theoretical noise calculation for the specified MULTIACCUM exposure
in terms of e-/sec. This uses the pre-defined detector-specific noise
properties. Can specify flux of a source as well as background and
zodiacal light (in e-/sec/pix). After getting the noise per pixel per
ramp (integration), value(s) are divided by the sqrt(NINT) to return
the final noise
Parameters
----------
fsrc : float or image
Flux of source in e-/sec/pix
fzodi : float or image
Flux of the zodiacal background in e-/sec/pix
fbg : float or image
Flux of telescope background in e-/sec/pix
idark : float or image
Option to specify dark current in e-/sec/pix.
rn : float
Option to specify Read Noise per pixel (e-).
ktc : float
Option to specify kTC noise (in e-). Only valid for single frame (n=1)
p_excess : array-like
Optional. An array or list of two elements that holds the parameters
describing the excess variance observed in effective noise plots.
By default these are both 0. For NIRCam detectors, recommended
values are [1.0,5.0] for SW and [1.5,10.0] for LW.
ng : None or int or image
Option to explicitly states number of groups. This is specifically
used to enable the ability of only calculating pixel noise for
unsaturated groups for each pixel. If a numpy array, then it should
be the same shape as `fsrc` image. By default will use `self.ngroup`.
verbose : bool
Print out results at the end.
Keyword Arguments
-----------------
ideal_Poisson : bool
If set to True, use total signal for noise estimate,
otherwise MULTIACCUM equation is used.
Notes
-----
fsrc, fzodi, and fbg are functionally the same as they are immediately summed.
They can also be single values or multiple elements (list, array, tuple, etc.).
If multiple inputs are arrays, make sure their array sizes match.
"""
ma = self.multiaccum
if ng is None:
ng = ma.ngroup
if nf is None:
nf = ma.nf
if rn is None:
rn = self.read_noise
if ktc is None:
ktc = self.ktc
if p_excess is None:
p_excess = self.p_excess
if idark is None:
idark = self.dark_current
# Pixel noise per ramp (e-/sec/pix)
pn = pix_noise(ngroup=ng, nf=nf, nd2=ma.nd2, tf=self.time_frame,
rn=rn, ktc=ktc, p_excess=p_excess,
idark=idark, fsrc=fsrc, fzodi=fzodi, fbg=fbg, **kwargs)
# Divide by sqrt(Total Integrations)
final = pn / np.sqrt(ma.nint)
if verbose:
print('Noise (e-/sec/pix): {}'.format(final))
print('Total Noise (e-/pix): {}'.format(final*self.time_exp))
return final
@property
def fastaxis(self):
"""Fast readout direction in sci coords"""
# https://jwst-pipeline.readthedocs.io/en/latest/jwst/references_general/references_general.html#orientation-of-detector-image
# 481, 3, 5, 7, 9 have fastaxis equal -1
# Others have fastaxis equal +1
fastaxis = -1 if np.mod(self.scaid,2)==1 else +1
return fastaxis
@property
def slowaxis(self):
"""Slow readout direction in sci coords"""
# https://jwst-pipeline.readthedocs.io/en/latest/jwst/references_general/references_general.html#orientation-of-detector-image
# 481, 3, 5, 7, 9 have slowaxis equal +2
# Others have slowaxis equal -2
slowaxis = +2 if np.mod(self.scaid,2)==1 else -2
return slowaxis
def make_header(self, filter=None, pupil_mask=None, obs_time=None, **kwargs):
"""
Create a generic NIRCam FITS header.
Parameters
----------
filter :str
Name of filter element.
pupil_mask : str
Name of pupil element.
obs_time : datetime
Specifies when the observation was considered to be executed.
If not specified, then it will choose the current time.
This must be a datetime object:
>>> datetime.datetime(2016, 5, 9, 11, 57, 5, 796686)
"""
return nrc_header(self, filter=filter, pupil=pupil_mask, obs_time=obs_time, **kwargs)
class NIRCam(NIRCam_ext):
"""NIRCam base instrument class
Creates a NIRCam instrument class that holds all the information pertinent to
an observation using a given observation. This class extends the NIRCam subclass
``webbpsf_ext.NIRCam_ext``, to generate PSF coefficients to calculate an arbitrary
PSF based on wavelength, field position, and WFE drift.
In addition to PSF generation, includes ability to estimate detector saturation
limits, sensitivities, and perform ramp optimizations.
Parameters
==========
filter : str
Name of input filter.
pupil_mask : str, None
Pupil elements such as grisms or lyot stops (default: None).
image_mask : str, None
Specify which coronagraphic occulter (default: None).
ND_acq : bool
Add in neutral density attenuation in throughput and PSF creation?
Used primarily for sensitivity and saturation calculations.
Not recommended for simulations (TBI).
detector : int or str
NRC[A-B][1-5] or 481-490
apname : str
Pass specific SIAF aperture name, which will update pupil mask, image mask,
and detector subarray information.
autogen_coeffs : bool
Automatically generate base PSF coefficients. Equivalent to performing
``self.gen_psf_coeff()``. Default: True
WFE drift and field-dependent coefficients should be run manually via
``gen_wfedrift_coeff``, ``gen_wfefield_coeff``, and ``gen_wfemask_coeff``.
Keyword Args
============
wind_mode : str
Window mode type 'FULL', 'STRIPE', 'WINDOW'.
xpix : int
Size of window in x-pixels for frame time calculation.
ypix : int
Size of window in y-pixels for frame time calculation.
x0 : int
Lower-left x-coord position of detector window.
y0 : int
Lower-left y-coord position of detector window.
read_mode : str
NIRCam Ramp Readout mode such as 'RAPID', 'BRIGHT1', etc.
nint : int
Number of integrations (ramps).
ngroup : int
Number of groups in a integration.
nf : int
Number of frames per group.
nd1 : int
Number of drop frame after reset (before first group read).
nd2 : int
Number of drop frames within a group (ie., groupgap).
nd3 : int
Number of drop frames after final read frame in ramp.
nr1 : int
Number of reset frames within first ramp.
nr2 : int
Number of reset frames for subsequent ramps.
PSF Keywords
============
fov_pix : int
Size of the PSF FoV in pixels (real SW or LW pixels).
The defaults depend on the type of observation.
Odd number place the PSF on the center of the pixel,
whereas an even number centers it on the "crosshairs."
oversample : int
Factor to oversample during WebbPSF calculations.
Default 2 for coronagraphy and 4 otherwise.
include_si_wfe : bool
Include SI WFE measurements? Default=True.
include_distortions : bool
If True, will include a distorted version of the PSF.
pupil : str
File name or HDUList specifying telescope entrance pupil.
Can also be an OTE_Linear_Model.
pupilopd : tuple or HDUList
Tuple (file, index) or filename or HDUList specifying OPD.
Can also be an OTE_Linear_Model.
wfe_drift : float
Wavefront error drift amplitude in nm.
offset_r : float
Radial offset from the center in arcsec.
offset_theta :float
Position angle for radial offset, in degrees CCW.
bar_offset : float
For wedge masks, option to set the PSF position across the bar.
jitter : str or None
Currently either 'gaussian' or None.
jitter_sigma : float
If ``jitter = 'gaussian'``, then this is the size of the blurring effect.
npsf : int
Number of wavelengths/PSFs to fit.
ndeg : int
Degree of polynomial fit.
nproc : int
Manual setting of number of processor cores to break up PSF calculation.
If set to None, this is determined based on the requested PSF size,
number of available memory, and hardware processor cores. The automatic
calculation endeavors to leave a number of resources available to the
user so as to not crash the user's machine.
save : bool
Save the resulting PSF coefficients to a file? (default: True)
force : bool
Forces a recalculation of PSF even if saved PSF exists. (default: False)
quick : bool
Only perform a fit over the filter bandpass with a lower default polynomial degree fit.
(default: True)
use_legendre : bool
Fit with Legendre polynomials, an orthonormal basis set. (default: True)
"""
def __init__(self, filter=None, pupil_mask=None, image_mask=None,
ND_acq=False, detector=None, apname=None, autogen_coeffs=True, **kwargs):
if detector is not None:
detector = get_detname(detector)
# Available Filters
# Note: Certain narrowband filters reside in the pupil wheel and cannot be paired
# with pupil elements. This will be checked for later.
self._filters_sw = ['F070W', 'F090W', 'F115W', 'F150W', 'F150W2', 'F200W',
'F140M', 'F162M', 'F182M', 'F210M', 'F164N', 'F187N', 'F212N']
self._filters_lw = ['F277W', 'F322W2', 'F356W', 'F444W', 'F323N', 'F405N', 'F466N', 'F470N',
'F250M', 'F300M', 'F335M', 'F360M', 'F410M', 'F430M', 'F460M', 'F480M']
# Coronagraphic Masks
self._coron_masks = [None, 'MASK210R', 'MASK335R', 'MASK430R', 'MASKSWB', 'MASKLWB']
# self.offset_bar = offset_bar
# Pupil Wheel elements
self._lyot_masks = ['CIRCLYOT', 'WEDGELYOT']
# DHS in SW and Grisms in LW
self._dhs = ['DHS0', 'DHS60']
# Grism0/90 => GrismR/C
self._grism = ['GRISMR', 'GRISMC']
# Weak lens are only in SW pupil wheel (+4 in filter wheel)
self._weak_lens = ['WLP4', 'WLPM4', 'WLP8', 'WLM8', 'WLP12']
# Check alternate inputs
if pupil_mask is not None:
pupil_mask = pupil_mask.upper()
# If alternate Weak Lens values are specified
if 'WL' in pupil_mask:
wl_alt = {
'WEAK LENS +4': 'WLP4',
'WEAK LENS +8': 'WLP8',
'WEAK LENS -8': 'WLM8',
'WEAK LENS +12 (=4+8)': 'WLP12',
'WEAK LENS -4 (=4-8)': 'WLM4',
}
pupil_mask = wl_alt.get(pupil_mask, pupil_mask)
# Pair F200W throughput with WL+4
# The F212N2 throughput is then handled in read_filter() function
wl_list = ['WLP12', 'WLM4', 'WLP4']
if (pupil_mask in wl_list) and ((filter is None) or (filter!='F200W')):
filter = 'F200W'
# Check Grism alternate inputs
if 'GRISM0' in pupil_mask:
pupil_mask = 'GRISMR'
elif 'GRISM90' in pupil_mask:
pupil_mask = 'GRISMC'
super().__init__(filter=filter, pupil_mask=pupil_mask, image_mask=image_mask, **kwargs)
if apname is None:
if detector is not None:
self.detector = detector
self._ND_acq = ND_acq
self._validate_wheels()
self.update_detectors(**kwargs)
ap_name_rec = self.get_siaf_apname()
self.update_from_SIAF(ap_name_rec, pupil_mask=pupil_mask)
else:
self.update_from_SIAF(apname, pupil_mask=pupil_mask, **kwargs)
# Default to no jitter for coronagraphy
self.options['jitter'] = None if self.is_coron else 'gaussian'
# Generate PSF coefficients
if autogen_coeffs:
self.gen_psf_coeff(**kwargs)
# Background fov pix is only for coronagraphic masks
# Create a background reference class
self._fov_pix_bg = 33
self._fov_bg_match = False
# if autogen_coeffs:
self._update_bg_class(**kwargs)
# Check aperture info is consistent if not explicitly specified
ap_name_rec = self.get_siaf_apname()
if ((apname is None) and (ap_name_rec != self.aperturename) and
not (('FULL' in self.aperturename) and ('TAMASK' in self.aperturename))):
# Warning strings
out_str1 = f'Current aperture {self.aperturename} does not match recommendation ({ap_name_rec}).'
out_str2 = f'Perhaps try self.aperturename = self.get_siaf_apname()'
_log.info(out_str1)
_log.info(out_str2)
def _update_bg_class(self, fov_bg_match=None, **kwargs):
"""
If there is a coronagraphic spot or bar, then we may need to
generate another background PSF for sensitivity information.
It's easiest just to ALWAYS do a small footprint without the
coronagraphic mask and save the PSF coefficients.
WARNING: This assumes throughput of the coronagraphic substrate
for observations with a Lyot pupil mask.
Parameters
==========
fov_bg_match : bool or None
Determines whether or not to match bg FoV to sci FoV for
coronagraphic observations. If set to None, default to
`self._fov_bg_match` property. If a boolean value is
provided, then `self._fov_bg_match` is updated.
"""
try:
# Make sure we don't inadvertently delete base object
if self._nrc_bg is not self:
del self._nrc_bg
except AttributeError:
pass
# Update background PSF size if fov_bg_match is True
if fov_bg_match is not None:
self._fov_bg_match = fov_bg_match
self._fov_pix_bg = self.fov_pix if self._fov_bg_match else self._fov_pix_bg
if self._image_mask is None:
self._nrc_bg = self
else:
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
nrc_bg = NIRCam_ext(filter=self.filter, pupil_mask=self.pupil_mask,
fov_pix=self._fov_pix_bg, oversample=self._oversample)
# Generate coefficients
nrc_bg.gen_psf_coeff(**kwargs)
setup_logging(log_prev, verbose=False)
# Save as attribute
self._nrc_bg = nrc_bg
# Allowed values for filters, coronagraphic masks, and pupils
# @property
# def filter_list(self):
# """List of allowable filters."""
# return self._filters_sw + self._filters_lw
# @property
# def image_mask_list(self):
# """List of allowable coronagraphic mask values."""
# return self._coron_masks
# @property
# def pupil_mask_list(self):
# """List of allowable pupil mask values."""
# return ['CLEAR','FLAT'] + self._lyot_masks + self._grism + self._dhs + self._weak_lens
def plot_bandpass(self, ax=None, color=None, title=None, **kwargs):
"""
Plot the instrument bandpass on a selected axis.
Can pass various keywords to ``matplotlib.plot`` function.
Parameters
----------
ax : matplotlib.axes, optional
Axes on which to plot bandpass.
color :
Color of bandpass curve.
title : str
Update plot title.
Returns
-------
matplotlib.axes
Updated axes
"""
if ax is None:
f, ax = plt.subplots(**kwargs)
color='C2' if color is None else color
bp = self.bandpass
w = bp.wave / 1e4; f = bp.throughput
ax.plot(w, f, color=color, label=bp.name+' Filter', **kwargs)
ax.set_xlabel('Wavelength ($\mathdefault{\mu m}$)')
ax.set_ylabel('Throughput')
if title is None:
title = bp.name + ' - Module ' + self.module
ax.set_title(title)
return ax
# Check consistencies
def _validate_wheels(self):
"""
Validation to make sure the selected filters and pupils are allowed to be in parallel.
"""
def do_warn(wstr):
_log.warning(wstr)
_log.warning('Proceed at your own risk!')
filter = self._filter
pupil_mask = self._pupil_mask
image_mask = self._image_mask
if self.channel=='long' or self.channel=='LW':
channel = 'LW'
else:
channel = 'SW'
if image_mask is None:
image_mask = ''
if pupil_mask is None:
pupil_mask = ''
# Weak lenses can only occur in SW modules
if ('WEAK LENS' in pupil_mask) and (channel=='LW'):
wstr = '{} in pupil is not valid with filter {}.'.format(pupil_mask,filter)
wstr = wstr + '\nWeak lens only in SW module.'
do_warn(wstr)
# DHS in SW modules
if ('DHS' in pupil_mask) and (channel=='LW'):
wstr = '{} in pupil is not valid with filter {}.'.format(pupil_mask,filter)
wstr = wstr + '\nDHS only in SW module.'
do_warn(wstr)
# DHS cannot be paired with F164N or F162M
flist = ['F164N', 'F162M']
if ('DHS' in pupil_mask) and (filter in flist):
wstr = 'Both {} and filter {} exist in same pupil wheel.'.format(pupil_mask,filter)
do_warn(wstr)
# Grisms in LW modules
if ('GRISM' in pupil_mask) and (channel=='SW'):
wstr = '{} in pupil is not valid with filter {}.'.format(pupil_mask,filter)
wstr = wstr + '\nGrisms only in LW module.'
do_warn(wstr)
# Grisms cannot be paired with any Narrowband filters
flist = ['F323N', 'F405N', 'F466N', 'F470N']
if ('GRISM' in pupil_mask) and (filter in flist):
wstr = 'Both {} and filter {} exist in same pupil wheel.'.format(pupil_mask,filter)
do_warn(wstr)
# MASK430R falls in SW SCA gap and cannot be seen by SW module
if ('MASK430R' in image_mask) and (channel=='SW'):
wstr = '{} mask is no visible in SW module (filter is {})'.format(image_mask,filter)
do_warn(wstr)
# Need F200W paired with WEAK LENS +4
# The F212N2 filter is handled in the read_filter function
wl_list = ['WEAK LENS +12 (=4+8)', 'WEAK LENS -4 (=4-8)', 'WEAK LENS +4']
if (pupil_mask in wl_list) and (filter!='F200W'):
wstr = '{} is only valid with filter F200W.'.format(pupil_mask)
do_warn(wstr)
# Items in the same SW pupil wheel
sw2 = ['WEAK LENS +8', 'WEAK LENS -8', 'F162M', 'F164N', 'CIRCLYOT', 'WEDGELYOT']
if (filter in sw2) and (pupil_mask in sw2):
wstr = '{} and {} are both in the SW Pupil wheel.'.format(filter,pupil_mask)
do_warn(wstr)
# Items in the same LW pupil wheel
lw2 = ['F323N', 'F405N', 'F466N', 'F470N', 'CIRCLYOT', 'WEDGELYOT']
if (filter in lw2) and (pupil_mask in lw2):
wstr = '{} and {} are both in the LW Pupil wheel.'.format(filter,pupil_mask)
do_warn(wstr)
# ND_acq must have a LYOT stop, otherwise coronagraphic mask is not in FoV
if self.ND_acq and ('LYOT' not in pupil_mask):
wstr = 'CIRCLYOT or WEDGELYOT must be in pupil wheel if ND_acq=True.'
do_warn(wstr)
# ND_acq and coronagraphic mask are mutually exclusive
if self.ND_acq and (image_mask != ''):
wstr = 'If ND_acq is set, then mask must be None.'
do_warn(wstr)
def update_detectors(self, verbose=False, **kwargs):
""" Update detector operation parameters
Creates detector object based on :attr:`detector` attribute.
This function should be called any time a filter, pupil, mask, or
module is modified by the user.
If the user wishes to change any properties of the multiaccum ramp
or detector readout mode, pass those arguments through this function
rather than creating a whole new NIRCam() instance. For example:
>>> nrc = pynrc.NIRCam('F430M', ngroup=10, nint=5)
>>> nrc.update_detectors(ngroup=2, nint=10, wind_mode='STRIPE', ypix=64)
A dictionary of the keyword settings can be referenced in :attr:`det_info`.
This dictionary cannot be modified directly.
Parameters
----------
verbose : bool
Print out ramp and detector settings.
Keyword Args
------------
wind_mode : str
Window mode type 'FULL', 'STRIPE', 'WINDOW'.
xpix : int
Size of window in x-pixels for frame time calculation.
ypix : int
Size of window in y-pixels for frame time calculation.
x0 : int
Lower-left x-coord position of detector window.
y0 : int
Lower-left y-coord position of detector window.
read_mode : str
NIRCam Ramp Readout mode such as 'RAPID', 'BRIGHT1', etc.
nint : int
Number of integrations (ramps).
ngroup : int
Number of groups in a integration.
nf : int
Number of frames per group.
nd1 : int
Number of drop frame after reset (before first group read).
nd2 : int
Number of drop frames within a group (ie., groupgap).
nd3 : int
Number of drop frames after final read frame in ramp.
nr1 : int
Number of reset frames within first ramp.
nr2 : int
Number of reset frames for subsequent ramps.
"""
# Check if kwargs is empty
if not kwargs:
try:
kwargs = self.det_info
except AttributeError:
kwargs = {}
else:
try:
self._det_info.update(kwargs)
except AttributeError:
self._det_info = kwargs
kwargs = self.det_info
# Update detector class
# For now, it's just easier to delete old instances and start from scratch
# rather than tracking changes and updating only the changes. That could
# get complicated, and I don't think there is a memory leak from deleting
# the Detector instances.
try:
del self.Detector
except AttributeError:
pass
self.Detector = DetectorOps(detector=self.detector, **kwargs)
# Update stored kwargs
kw1 = self.Detector.to_dict()
_ = kw1.pop('detector', None)
kw2 = self.multiaccum.to_dict()
self._det_info = merge_dicts(kw1,kw2)
if verbose:
print('New Ramp Settings')
keys = ['read_mode', 'nf', 'nd2', 'ngroup', 'nint']
for k in keys:
v = self.det_info[k]
if isinstance(v,float): print("{:<9} : {:>8.0f}".format(k, v))
else: print(" {:<10} : {:>8}".format(k, v))
print('New Detector Settings')
keys = ['wind_mode', 'xpix', 'ypix', 'x0', 'y0']
for k in keys:
v = self.det_info[k]
if isinstance(v,float): print("{:<9} : {:>8.0f}".format(k, v))
else: print(" {:<10} : {:>8}".format(k, v))
print('New Ramp Times')
ma = self.multiaccum_times
keys = ['t_group', 't_frame', 't_int', 't_int_tot1', 't_int_tot2', 't_exp', 't_acq']
for k in keys:
print(' {:<10} : {:>8.3f}'.format(k, ma[k]))
def update_psf_coeff(self, filter=None, pupil_mask=None, image_mask=None, detector=None,
fov_pix=None, oversample=None, include_si_wfe=None, include_distortions=None,
pupil=None, pupilopd=None, offset_r=None, offset_theta=None, bar_offset=None,
jitter=None, jitter_sigma=None, npsf=None, ndeg=None, nproc=None, quick=None,
save=None, force=False, use_legendre=None, **kwargs):
""" Update properties and create new set of PSF coefficients
Parameters
----------
filter : str
Name of NIRCam filter.
pupil_mask : str, None
NIRCam pupil elements such as grisms or lyot stops (default: None).
image_mask : str, None
Specify which coronagraphic occulter (default: None).
detector : str
Name of detector (e.g., "NRCA5")
fov_pix : int
Size of the PSF FoV in pixels (real SW or LW pixels).
The defaults depend on the type of observation.
Odd number place the PSF on the center of the pixel,
whereas an even number centers it on the "crosshairs."
oversample : int
Factor to oversample during WebbPSF calculations.
Default 2 for coronagraphy and 4 otherwise.
include_si_wfe : bool
Include SI WFE measurements? Default=True.
include_distortions : bool
If True, will include a distorted version of the PSF.
pupil : str
File name or HDUList specifying telescope entrance pupil.
Can also be an OTE_Linear_Model.
pupilopd : tuple or HDUList
Tuple (file, index) or filename or HDUList specifying OPD.
Can also be an OTE_Linear_Model.
wfe_drift : float
Wavefront error drift amplitude in nm.
offset_r : float
Radial offset from the center in arcsec.
offset_theta :float
Position angle for radial offset, in degrees CCW.
bar_offset : float
For wedge masks, option to set the PSF position across the bar.
jitter : str or None
Currently either 'gaussian' or None.
jitter_sigma : float
If ``jitter = 'gaussian'``, then this is the size of the blurring effect.
npsf : int
Number of wavelengths/PSFs to fit.
ndeg : int
Degree of polynomial fit.
nproc : int
Manual setting of number of processor cores to break up PSF calculation.
If set to None, this is determined based on the requested PSF size,
number of available memory, and hardware processor cores. The automatic
calculation endeavors to leave a number of resources available to the
user so as to not crash the user's machine.
save : bool
Save the resulting PSF coefficients to a file? (default: True)
force : bool
Forces a recalcuation of PSF even if saved PSF exists. (default: False)
quick : bool
Only perform a fit over the filter bandpass with a lower default polynomial degree fit.
(default: True)
use_legendre : bool
Fit with Legendre polynomials, an orthonormal basis set. (default: True)
"""
update_coeffs = False
update_bg_coeffs = False
# filter, pupil mask, and image mask
if (filter is not None) and (filter != self.filter):
update_coeffs = True
update_bg_coeffs = True
self.filter = filter
if (pupil_mask is not None) and (pupil_mask != self.pupil_mask):
update_coeffs = True
update_bg_coeffs = True
if (pupil_mask.upper()=="CLEAR") or (pupil_mask.upper()=="NONE"):
pupil_mask = None
self.pupil_mask = pupil_mask
if (image_mask is not None) and (image_mask != self.image_mask):
update_coeffs = True
update_bg_coeffs = True
if (image_mask.upper()=="CLEAR") or (image_mask.upper()=="NONE"):
image_mask = None
self.image_mask = image_mask
if (fov_pix is not None) and (fov_pix != self.fov_pix):
update_coeffs = True
self.fov_pix = fov_pix
if (oversample is not None) and (oversample != self.oversample):
update_coeffs = True
self.oversample = oversample
# SI WFE and distortions
if (include_si_wfe is not None) and (include_distortions != self.include_distortions):
update_coeffs = True
self.include_si_wfe = include_si_wfe
if (include_distortions is not None) and (include_distortions != self.include_distortions):
update_coeffs = True
self.include_distortions = include_distortions
# Pupil OPD information
if (pupil is not None) and (self.pupil != pupil):
update_coeffs = True
self.pupil = pupil
if (pupilopd is not None) and (self.pupilopd != pupilopd):
update_coeffs = True
self.pupilopd = pupilopd
# Source and mask offsetting
if (offset_r is not None) and (self.options.get('source_offset_r') != offset_r):
update_coeffs = True
self.options['source_offset_r'] = offset_r
if (offset_theta is not None) and (self.options.get('source_offset_theta') != offset_theta):
update_coeffs = True
self.options['source_offset_theta'] = offset_theta
if (bar_offset is not None) and (self.options.get('bar_offset') != bar_offset):
update_coeffs = True
self.options['bar_offset'] = bar_offset
# Jitter
if (jitter is not None) and (self.options.get('jitter') != jitter):
update_coeffs = True
self.options['jitter'] = jitter
if (jitter_sigma is not None) and (self.options.get('jitter_sigma') != jitter_sigma):
update_coeffs = True
self.options['jitter_sigma'] = jitter_sigma
# Misecellaneous
if (npsf is not None) and (self.npsf != npsf):
update_coeffs = True
self.npsf = npsf
if (ndeg is not None) and (self.ndeg != ndeg):
update_coeffs = True
self.ndeg = ndeg
if (quick is not None) and (self.quick != quick):
update_coeffs = True
self.quick = quick
if (use_legendre is not None) and (self.use_legendre != use_legendre):
update_coeffs = True
self.use_legendre = use_legendre
# Detector update
if detector is not None:
update_coeffs = True
self.detector = get_detname(detector)
self.update_detectors()
# Regenerate PSF coefficients
if update_coeffs:
del self.psf_coeff, self.psf_coeff_header
save = True if save is None else save
self.gen_psf_coeff(save=save, force=force, nproc=nproc, **kwargs)
# Update drift, field, and mask-dependent coefficients
if self._psf_coeff_mod['wfe_drift'] is not None:
self.gen_wfedrift_coeff()
if self._psf_coeff_mod['si_field'] is not None:
self.gen_wfefield_coeff()
if self._psf_coeff_mod['si_mask'] is not None:
self.gen_wfemask_coeff()
# Update bg class if filter or pupil mask is changed
if update_bg_coeffs:
self._update_bg_class()
@property
def psf_info(self):
"""PSF parameters"""
d_options = self.options
d = {
'fov_pix': self.fov_pix, 'oversample': self.oversample,
'npsf': self.npsf, 'ndeg': self.ndeg, 'include_si_wfe': self.include_si_wfe,
'include_distortions': self.include_distortions,
'jitter': d_options.get('jitter'), 'jitter_sigma': d_options.get('jitter_sigma'),
'offset_r': d_options.get('source_offset_r', 0), 'offset_theta': d_options.get('source_offset_theta', 0),
'bar_offset': d_options.get('bar_offset', None),
'pupil': self.pupil, 'pupilopd': self.pupilopd,
}
return d
@property
def multiaccum(self):
""":class:`multiaccum` object"""
return self.Detector.multiaccum
@property
def multiaccum_times(self):
"""Exposure timings in dictionary
t_frame : Time of a single frame.
t_group : Time of a single group (read frames + drop frames).
t_int : Photon collection time for a single ramp/integration.
t_int_tot1: Total time for all frames (reset+read+drop) in a first ramp.
t_int_tot2: Total time for all frames (reset+read+drop) in a subsequent ramp.
t_exp : Total photon collection time for all ramps.
t_acq : Total acquisition time to complete exposure with all overheads.
"""
return self.Detector.times_to_dict()
@property
def det_info(self):
"""Dictionary housing detector info parameters and keywords."""
return self._det_info
@property
def well_level(self):
"""Detector well level in units of electrons"""
return self.Detector.well_level
@property
def siaf_ap_names(self):
"""Give all possible SIAF aperture names"""
return list(self.siaf.apernames)
def get_siaf_apname(self):
"""Get SIAF aperture based on instrument settings"""
# Return already defined ap name
# if (self.siaf_ap is not None) and (not override):
# return self.siaf_ap.AperName
# else:
detid = self.Detector.detid
wind_mode = self.Detector.wind_mode
is_lyot = self.is_lyot
is_coron = self.is_coron
is_grism = self.is_grism
pupil_mask = self.pupil_mask
if self.channel=='long' or self.channel=='LW':
channel = 'LW'
else:
channel = 'SW'
# Time series filters
ts_filters = ['F277W','F356W','F444W','F322W2']
# Coronagraphic bar filters
swb_filters = ['F182M','F187N','F210M','F212N','F200W']
lwb_filters = [
'F250M','F300M','F277W','F335M','F360M',
'F356W','F410M','F430M','F460M','F480M','F444W'
]
# Coronagraphy
if is_coron:
wstr = 'FULL_' if wind_mode=='FULL' else ''
key = 'NRC{}_{}{}'.format(detid,wstr,self.image_mask)
if ('WB' in self.image_mask) and (self.module=='A') and (self.filter in swb_filters+lwb_filters):
key = key + '_{}'.format(self.filter)
if wind_mode=='STRIPE':
key = None
# Just Lyot stop without masks, assuming TA aperture
elif is_lyot: #and self.ND_acq:
tastr = 'TA' if self.ND_acq else 'FSTA'
key = 'NRC{}_{}'.format(detid,tastr)
if ('CIRC' in pupil_mask) and ('SW' in channel):
key = key + 'MASK210R'
elif ('CIRC' in pupil_mask) and ('LW' in channel):
key = key + 'MASK430R' if ('F4' in self.filter) else key + 'MASK335R'
elif ('WEDGE' in pupil_mask) and ('SW' in channel):
key = key + 'MASKSWB'
elif ('WEDGE' in pupil_mask) and ('LW' in channel):
key = key + 'MASKLWB'
# Time series grisms
elif is_grism and ('GRISMR' in pupil_mask) and (self.filter in ts_filters):
if wind_mode=='FULL':
key = f'NRC{detid}_GRISM_{self.filter}'
elif wind_mode=='STRIPE':
key = 'NRC{}_GRISM{}_{}'.format(detid,self.det_info['ypix'],self.filter)
else:
key = None
# SW Time Series with LW grism
elif wind_mode=='STRIPE':
key = 'NRC{}_GRISMTS{:.0f}'.format(detid,self.det_info['ypix'])
# WFSS
elif is_grism and (wind_mode=='FULL'):
key = 'NRC{}_FULL_{}_WFSS'.format(detid, pupil_mask)
# Subarrays
elif wind_mode=='WINDOW':
key = 'NRC{}_SUB{}P'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAPSIMG{}'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAGRISMTS{}'.format(detid,self.det_info['xpix'])
if key not in self.siaf_ap_names:
key = 'NRC{}_TAGRISMTS_SCI_{}'.format(detid,self.filter)
if key not in self.siaf_ap_names:
key = 'NRC{}_SUB{}'.format(detid,self.det_info['xpix'])
# Full frame generic
elif wind_mode=='FULL':
key = 'NRC{}_FULL'.format(detid)
else:
key = None
# Check if key exists
if key in self.siaf_ap_names:
_log.info('Suggested SIAF aperture name: {}'.format(key))
return key
else:
_log.warning("Suggested SIAF aperture name '{}' is not defined".format(key))
return None
def get_subarray_name(self, apname=None):
"""Get JWST NIRCam subarray name"""
if apname is None:
apname = self.get_siaf_apname()
pupil_mask = self.pupil_mask
image_mask = self.image_mask
module = self.module
detid = self.Detector.detid
wind_mode = self.Detector.wind_mode
ypix = self.det_info['ypix']
is_lyot = self.is_lyot
is_coron = self.is_coron
is_grism = self.is_grism
is_ndacq = self.ND_acq
if 'FULL' in wind_mode:
subarray_name = 'FULLP' if apname[-1] == 'P' else 'FULL'
elif 'STRIPE' in wind_mode:
subarray_name = f'SUBGRISM{ypix}'
elif is_coron:
sub_str = f'SUB{ypix}'
mask_str = image_mask[4:]
if ('335R' in image_mask) and (module == 'A'):
subarray_name = sub_str + module
else:
subarray_name = sub_str + module + mask_str
# Just Lyot stop without masks, assuming TA aperture
elif is_lyot:
mask_str = image_mask[4:]
# Faint source TA
if not is_ndacq:
subarray_name = 'SUBFS' + module + mask_str
elif 'LWB' in image_mask: # ND TA
if 'LWBL' in apname:
subarray_name = 'SUBND' + module + 'LWBL'
else:
subarray_name = 'SUBND' + module + 'LWBS'
elif 'SWB' in image_mask: # ND TA
if 'SWBS' in apname:
subarray_name = 'SUBND' + module + 'LWBS'
else:
subarray_name = 'SUBND' + module + 'LWBL'
else:
subarray_name = 'SUBND' + module + mask_str
else:
subarray_name = f'SUB{ypix}P' if apname[-1] == 'P' else f'SUB{ypix}'
# TODO: Grism TS TA, Fine phasing (FP), and DHS
return subarray_name
def update_from_SIAF(self, apname, pupil_mask=None, **kwargs):
"""Update detector properties based on SIAF aperture"""
if apname is None:
_log.warn('update_from_SIAF: Input apname was None. Returning...')
return
if not (apname in self.siaf_ap_names):
# raise ValueError(f'Cannot find {apname} in siaf.apernames list.')
_log.warn(f'update_from_SIAF: Cannot find {apname} in siaf.apernames list. Returing...')
return
if ('NRCALL' in apname) or ('NRCAS' in apname) or ('NRCBS' in apname):
raise ValueError(f'{apname} is not valid. Single detector apertures only.')
# Convert SCA name to detector ID
scaname = apname[0:5]
module = scaname[3]
channel = 'LW' if scaname[-1]=='5' else 'SW'
detid = 480 + int(scaname[4]) if module=='A' else 485 + int(scaname[4])
siaf_ap = self.siaf[apname]
xpix = int(siaf_ap.XSciSize)
ypix = int(siaf_ap.YSciSize)
if (xpix >= 2048) and (ypix>=2048):
wind_mode = 'FULL'
elif (xpix >= 2048):
wind_mode = 'STRIPE'
else:
wind_mode = 'WINDOW'
# Get lower left corner from siaf info
# This is in full frame detector coordinates
x0, y0 = np.array(siaf_ap.dms_corner()) - 1
# Update pupil and mask info
image_mask = None
ND_acq = False
filter = None
# Coronagraphic mask observations
if 'MASK' in apname:
# Set default pupil
if pupil_mask is None:
pupil_mask = 'WEDGELYOT' if 'WB' in apname else 'CIRCLYOT'
# Set mask occulter for all full arrays (incl. TAs) and science subarrays
# Treats full array TAs like a full coronagraphic observation
if ('FULL' in apname) or ('_MASK' in apname):
if ('MASKSWB' in apname):
image_mask = 'MASKSWB'
elif ('MASKLWB' in apname):
image_mask = 'MASKLWB'
elif ('MASK210R' in apname):
image_mask = 'MASK210R'
elif ('MASK335R' in apname):
image_mask = 'MASK335R'
elif ('MASK430R' in apname):
image_mask = 'MASK430R'
if 'TA' in apname:
_log.info('Full TA apertures are treated similar to coronagraphic observations.')
_log.info("To calculate SNR, self.update_psf_coeff(image_mask='CLEAR') and set self.ND_acq.")
elif '_TAMASK' in apname:
# For small TA subarray, turn off mask and enable ND square
image_mask = None
ND_acq = True
elif '_FSTAMASK in apname':
# Not really anything to do here
image_mask = None
else:
_log.warn(f'No mask setting for {apname}')
# Grism observations
elif 'GRISM' in apname:
if ('_GRISMC' in apname): # GRISMC WFSS
pupil_mask = 'GRISMC' if pupil_mask is None else pupil_mask
elif ('_GRISMR' in apname): # GRISMR WFSS
pupil_mask = 'GRISMR' if pupil_mask is None else pupil_mask
elif ('_GRISMTS' in apname): # SW apertures in parallel w/ LW GRISMTS
pupil_mask = 'WLP8' if pupil_mask is None else pupil_mask
elif ('_TAGRISMTS' in apname): # GRISM TA have no pupil
pupil_mask = None
elif ('_GRISM' in apname): # Everything else is GRISMR
pupil_mask = 'GRISMR' if pupil_mask is None else pupil_mask
else:
_log.warn(f'No grism setting for {apname}')
# Look for filter specified in aperture name
if ('_F1' in apname) or ('_F2' in apname) or ('_F3' in apname) or ('_F4' in apname):
# Find all instances of "_"
inds = [pos for pos, char in enumerate(apname) if char == '_']
# Filter is always appended to end, but can have different string sizes (F322W2)
filter = apname[inds[-1]+1:]
# Save to internal variables
self.pupil_mask = pupil_mask
self.image_mask = image_mask
self._ND_acq = ND_acq
# Filter stuff
# Defaults
fsw_def, flw_def = ('F210M', 'F430M')
if filter is not None:
self.filter = filter
try:
if self._filter is None:
self._filter = fsw_def if 'SW' in channel else flw_def
except AttributeError:
self._filter = fsw_def if 'SW' in channel else flw_def
# If filter doesn't make sense with channel
if channel=='SW' and self._filter not in self._filters_sw:
self._filter = fsw_def
if channel=='LW' and self._filter not in self._filters_lw:
self._filter = flw_def
self._validate_wheels()
# Update detector
det_kwargs = {'xpix': xpix, 'ypix': ypix, 'x0': x0, 'y0': y0, 'wind_mode':wind_mode}
kwargs = merge_dicts(kwargs, det_kwargs)
self.detector = get_detname(scaname)
self.update_detectors(**kwargs)
# Update aperture
self.siaf_ap = siaf_ap
def calc_psf_from_coeff(self, sp=None, return_oversample=True, return_hdul=True,
wfe_drift=None, coord_vals=None, coord_frame='tel', use_bg_psf=False, **kwargs):
kwargs['sp'] = sp
kwargs['return_oversample'] = return_oversample
kwargs['return_hdul'] = return_hdul
kwargs['wfe_drift'] = wfe_drift
kwargs['coord_vals'] = coord_vals
kwargs['coord_frame'] = coord_frame
if use_bg_psf:
return self._nrc_bg.calc_psf_from_coeff(**kwargs)
else:
return super().calc_psf_from_coeff(**kwargs)
def calc_psf(self, sp=None, return_oversample=True, return_hdul=True,
wfe_drift=None, coord_vals=None, coord_frame='tel', use_bg_psf=False,
**kwargs):
kwargs['sp'] = sp
kwargs['return_oversample'] = return_oversample
kwargs['return_hdul'] = return_hdul
kwargs['wfe_drift'] = wfe_drift
kwargs['coord_vals'] = coord_vals
kwargs['coord_frame'] = coord_frame
_log.info("Calculating PSF from WebbPSF parent function")
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
if use_bg_psf:
res = self._nrc_bg.calc_psf(**kwargs)
else:
res = super().calc_psf(**kwargs)
setup_logging(log_prev, verbose=False)
return res
def sat_limits(self, sp=None, bp_lim=None, units='vegamag', well_frac=0.8,
ngroup=None, trim_psf=33, verbose=False, **kwargs):
"""Saturation limits.
Generate the limiting magnitude (80% saturation) with the current instrument
parameters (filter and ramp settings) assuming some spectrum. If no spectrum
is defined, then a G2V star is assumed.
The user can also define a separate bandpass in which to determine the
limiting magnitude that will cause the current NIRCam bandpass to saturate.
Parameters
----------
sp : :mod:`pysynphot.spectrum`
Spectrum to determine saturation limit.
bp_lim : :mod:`pysynphot.obsbandpass`
Bandpass to report limiting magnitude.
units : str
Output units (defaults to vegamag).
well_frac : float
Fraction of full well to consider 'saturated'.
ngroup : int, None
Option to specify the number of groups to determine
integration time. If not set, then the default is to
use those specified in the Detectors class. Can set
ngroup=0 for the so-called Zero Frame in the event
there are multiple reads per group.
trim_psf : int, None
Option to crop the PSF coefficient around the brightest pixel.
For PSFs with large `fov_pix` values, this option helps speed
up the saturation limit calculation. Afterall, we're usually
only interested in the brightest pixel when calculating
saturation limits. Set to `None` to use the 'fov_pix' value.
Default = 33 (detector pixels).
verbose : bool
Print result details.
Example
-------
>>> nrc = pynrc.NIRCam('F430M') # Initiate NIRCam observation
>>> sp_A0V = pynrc.stellar_spectrum('A0V') # Define stellar spectral type
>>> bp_k = S.ObsBandpass('steward,k') # Pysynphot K-Band bandpass
>>> bp_k.name = 'K-Band'
>>> mag_lim = nrc.sat_limits(sp_A0V, bp_k, verbose=True)
Returns K-Band Limiting Magnitude for F430M assuming A0V source.
"""
from webbpsf_ext.psfs import gen_image_from_coeff
from copy import deepcopy
bp_lim = self.bandpass if bp_lim is None else bp_lim
quiet = False if verbose else True
# Total time spent integrating minus the reset frame
if ngroup is None:
t_sat = self.multiaccum_times['t_int']
else:
t_frame = self.multiaccum_times['t_frame']
if ngroup==0:
t_sat = t_frame
else:
ma = self.multiaccum
nf = ma.nf; nd1 = ma.nd1; nd2 = ma.nd2
t_sat = (nd1 + ngroup*nf + (ngroup-1)*nd2) * t_frame
# Full well level
well_level = self.well_level
# kwargs = merge_dicts(kwargs, self._psf_info)
# We don't necessarily need the entire image, so cut down to size
# 1. Create a temporary image at bp avg wavelength (monochromatic)
# 2. Find x,y position of max PSF
# 3. Cut out postage stamp region around that PSF coeff
psf_coeff = self.psf_coeff
psf_coeff_hdr = deepcopy(self.psf_coeff_header)
fov_pix, osamp = (psf_coeff_hdr['FOVPIX'], psf_coeff_hdr['OSAMP'])
if (trim_psf is not None) and (trim_psf < fov_pix):
# Quickly create a temporary PSF to find max value location
wtemp = np.array([bp_lim.wave[0], bp_lim.avgwave(), bp_lim.wave[-1]])
ttemp = np.array([bp_lim.sample(w) for w in wtemp])
bptemp = S.ArrayBandpass(wave=wtemp, throughput=ttemp)
# psf_temp, psf_temp_over = gen_image_coeff(bptemp, coeff=psf_coeff, coeff_hdr=psf_coeff_hdr, \
# fov_pix=fov_pix, oversample=osamp, return_oversample=True)
res = gen_image_from_coeff(self, psf_coeff, psf_coeff_hdr, nwaves=3, return_oversample=True)
if self.is_grism:
_, psf_temp_over = res
else:
psf_temp_over = res
# Amount to shift PSF
yind, xind = np.argwhere(psf_temp_over==psf_temp_over.max())[0]
ypix, xpix = psf_temp_over.shape
ysh = int(yind - ypix/2)
xsh = int(xind - xpix/2)
fov_pix_over = trim_psf * osamp
coeff = []
for im in psf_coeff:
im = fshift(im, -xsh, -ysh, interp='cubic')
im = pad_or_cut_to_size(im, (fov_pix_over,fov_pix_over))
coeff.append(im)
psf_coeff = np.array(coeff)
psf_coeff_hdr['FOVPIX'] = trim_psf
satlim = saturation_limits(self, psf_coeff=psf_coeff, psf_coeff_hdr=psf_coeff_hdr, sp=sp, units=units,
bp_lim=bp_lim, int_time=t_sat, full_well=well_level, well_frac=well_frac,
verbose=verbose, **kwargs)
return satlim
def saturation_levels(self, sp, full_size=True, ngroup=2, image=None, **kwargs):
""" Saturation levels
Create image showing level of saturation for each pixel.
Can either show the saturation after one frame (default)
or after the ramp has finished integrating (ramp_sat=True).
Parameters
----------
sp : :mod:`pysynphot.spectrum`
A pysynphot spectral object (normalized).
full_size : bool
Expand (or contract) to size of detector array?
If False, use fov_pix size.
ngroup : int
How many group times to determine saturation level?
If this number is higher than the total groups in ramp,
then a warning is produced. The default is ngroup=2,
A value of 0 corresponds to the so-called "zero-frame,"
which is the very first frame that is read-out and saved
separately. This is the equivalent to ngroup=1 for RAPID
and BRIGHT1 observations.
image : ndarray
Rather than generating an image on the fly, pass a pre-computed
slope image. Overrides `sp` and `full_size`
"""
assert ngroup >= 0
is_grism = self.is_grism
t_frame = self.multiaccum_times['t_frame']
t_int = self.multiaccum_times['t_int']
if ngroup==0:
t_sat = t_frame
else:
ma = self.multiaccum
nf = ma.nf; nd1 = ma.nd1; nd2 = ma.nd2
t_sat = (nd1 + ngroup*nf + (ngroup-1)*nd2) * t_frame
if t_sat>t_int:
_log.warning('ngroup*t_group is greater than t_int.')
# Slope image of input
if image is not None:
return image * t_sat / self.well_level
else:
image = self.calc_psf_from_coeff(sp=sp, return_oversample=False, return_hdul=False)
if is_grism:
wave, image = image
if full_size:
shape = (self.det_info['ypix'], self.det_info['xpix'])
image = pad_or_cut_to_size(image, shape)
# Add in zodi background to full image
image += self.bg_zodi(**kwargs)
# Well levels after "saturation time"
sat_level = image * t_sat / self.well_level
if is_grism:
return (wave, sat_level)
else:
return sat_level
def sensitivity(self, nsig=10, units=None, sp=None, verbose=False, **kwargs):
"""Sensitivity limits.
Convenience function for returning the point source (and surface brightness)
sensitivity for the given instrument setup. See `sensitivities` function
for more details.
Parameters
----------
nsig : int, float
Desired nsigma sensitivity (default 10).
units : str
Output units (defaults to uJy for grisms, nJy for imaging).
sp : :mod:`pysynphot.spectrum`
Input spectrum to use for determining sensitivity.
Only the spectral shape matters, unless ``forwardSNR=True``.
verbose : bool
Print result details.
Keyword Args
------------
forwardSNR : bool
Find the SNR of the input spectrum instead of sensitivity.
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
ideal_Poisson : bool
If set to True, use total signal for noise estimate,
otherwise MULTIACCUM equation is used.
rad_EE : float
Extraction aperture radius (in pixels) for imaging mode.
dw_bin : float
Delta wavelength for spectral sensitivities (grisms & DHS).
ap_spec : int, float
Instead of dw_bin, specify the spectral extraction aperture in pixels.
Takes priority over dw_bin. Value will get rounded up to nearest int.
"""
tf = self.multiaccum_times['t_frame']
det = self.Detector
ktc = det.ktc
rn = det.read_noise
idark = det.dark_current
p_excess = det.p_excess
pupil_mask = '' if self.pupil_mask is None else self.pupil_mask
kw1 = self.multiaccum.to_dict()
kw2 = {'rn':rn, 'ktc':ktc, 'idark':idark, 'p_excess':p_excess}
kwargs = merge_dicts(kwargs,kw1,kw2)
if 'ideal_Poisson' not in kwargs.keys():
kwargs['ideal_Poisson'] = True
# Always use the bg coeff
psf_coeff = self._nrc_bg.psf_coeff
psf_coeff_hdr = self._nrc_bg.psf_coeff_header.copy()
fov_pix, osamp = (psf_coeff_hdr['FOVPIX'], psf_coeff_hdr['OSAMP'])
# We don't necessarily need the entire image, so cut down to size for speed
if (not ('WEAK LENS' in pupil_mask)) and (fov_pix > 33):
fov_pix = 33
fov_pix_over = fov_pix * osamp
psf_coeff = np.array([pad_or_cut_to_size(im, (fov_pix_over,fov_pix_over)) for im in psf_coeff])
kwargs['fov_pix'] = fov_pix
psf_coeff_hdr['FOVPIX'] = fov_pix
bglim = sensitivities(self, psf_coeff=psf_coeff, psf_coeff_hdr=psf_coeff_hdr,
sp=sp, units=units, nsig=nsig, tf=tf, verbose=verbose, **kwargs)
return bglim
def bg_zodi(self, zfact=None, **kwargs):
"""Zodiacal background flux.
There are options to call `jwst_backgrounds` to obtain better
predictions of the background. Specify keywords `ra`, `dec`,
and `thisday` to use `jwst_backgrounds`.
Returned values are in units of e-/sec/pixel
Parameters
----------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
Keyword Args
------------
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation.
If not given, will use the average of visible calendar days.
Notes
-----
Representative values for zfact:
* 0.0 - No zodiacal emission
* 1.0 - Minimum zodiacal emission from JWST-CALC-003894
* 1.2 - Required NIRCam performance
* 2.5 - Average (default)
* 5.0 - High
* 10.0 - Maximum
"""
# Dark image
if self.is_dark:
return 0
bp = self.bandpass
waveset = bp.wave
sp_zodi = zodi_spec(zfact, **kwargs)
obs_zodi = S.Observation(sp_zodi, bp, waveset)
fzodi_pix = obs_zodi.countrate() * (self.pixelscale/206265.0)**2
# Recommend a zfact value if ra, dec, and thisday specified
if 'ra' in kwargs.keys():
sp_zodi_temp = zodi_spec(zfact=1)
obs_zodi_temp = S.Observation(sp_zodi_temp, bp, waveset)
fzodi_pix_temp = obs_zodi_temp.countrate() * (self.pixelscale/206265.0)**2
zf_rec = fzodi_pix / fzodi_pix_temp
str1 = 'Using ra,dec,thisday keywords can be relatively slow. \n'
str2 = '\tFor your specified loc and date, we recommend using zfact={:.1f}'.format(zf_rec)
_log.warn(str1 + str2)
# Don't forget about Lyot mask attenuation (not in bandpass throughput)
if self.is_lyot:
fzodi_pix *= 0.19
return fzodi_pix
def bg_zodi_image(self, zfact=None, frame='sci', **kwargs):
"""Zodiacal light image
Returns an image of background Zodiacal light emission
in e-/sec in specified coordinate frame.
Parameters
----------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
frame : str
Return in 'sci' or 'det' coordinates?
Keyword Args
------------
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation.
If not given, will use the average of visible calendar days.
Notes
-----
Representative values for zfact:
* 0.0 - No zodiacal emission
* 1.0 - Minimum zodiacal emission from JWST-CALC-003894
* 1.2 - Required NIRCam performance
* 2.5 - Average (default)
* 5.0 - High
* 10.0 - Maximum
"""
detid = self.Detector.detid
x0, y0 = (self.det_info['x0'], self.det_info['y0'])
xpix, ypix = (self.det_info['xpix'], self.det_info['ypix'])
# Dark image
if self.is_dark:
return np.zeros([ypix,xpix])
bp = self.bandpass
waveset = bp.wave
sp_zodi = zodi_spec(zfact, **kwargs)
obs_zodi = S.Observation(sp_zodi, bp, waveset)
fzodi_pix = obs_zodi.countrate() * (self.pixelscale/206265.0)**2
# Get equivalent
if 'ra' in kwargs.keys():
sp_zodi_temp = zodi_spec(zfact=1)
obs_zodi_temp = S.Observation(sp_zodi_temp, bp, waveset)
fzodi_pix_temp = obs_zodi_temp.countrate() * (self.pixelscale/206265.0)**2
zfact = fzodi_pix / fzodi_pix_temp
_ = kwargs.pop('ra')
_ = kwargs.pop('dec')
_ = kwargs.pop('thisday')
filter = self.filter
pupil_mask = self.pupil_mask
if self.is_grism:
# sci coords
im_bg = grism_background_image(filter, pupil=pupil_mask, module=self.module, sp_bg=sp_zodi, **kwargs)
# Convert to det coords and crop
im_bg = sci_to_det(im_bg, detid)
im_bg = im_bg[y0:y0+ypix, x0:x0+xpix]
# Back to sci coords
im_bg = det_to_sci(im_bg, detid)
elif self.is_coron or self.coron_substrate:
# Create full image, then crop based on detector configuration
im_bg = build_mask_detid(detid, oversample=1, pupil=pupil_mask, filter=self.filter)
if im_bg is None:
# In the event the specified detid has no coronagraphic mask
# This includes ['A1', 'A3', 'B2', 'B4']
im_bg = np.ones([ypix,xpix])
else:
# Convert to det coords and crop
im_bg = sci_to_det(im_bg, detid)
im_bg = im_bg[y0:y0+ypix, x0:x0+xpix]
# Back to sci coords and multiply by e-/sec/pix
im_bg = det_to_sci(im_bg, detid)
# Multiply by e-/sec/pix
im_bg *= self.bg_zodi(zfact, **kwargs)
else:
# No spatial structures for direct imaging an certain Lyot masks.
im_bg = np.ones([ypix,xpix]) * self.bg_zodi(zfact, **kwargs)
# Clear reference pixels
# im_bg = sci_to_det(im_bg, detid)
# mask_ref = self.Detector.mask_ref
# im_bg[mask_ref] = 0
# im_bg = det_to_sci(im_bg, detid)
if frame=='det':
return sci_to_det(im_bg, detid)
elif frame=='sci':
return im_bg
else:
raise ValueError(f"frame {frame} not recognized. Use either 'sci' or 'det'.")
def ramp_optimize(self, sp, sp_bright=None, is_extended=False, patterns=None,
snr_goal=None, snr_frac=0.02, tacq_max=None, tacq_frac=0.1,
well_frac_max=0.8, nint_min=1, nint_max=5000, ng_min=2, ng_max=None,
return_full_table=False, even_nints=False, verbose=False, **kwargs):
"""Optimize ramp settings.
Find the optimal ramp settings to observe a spectrum based on input constraints.
This function quickly runs through each detector readout pattern and
calculates the acquisition time and SNR for all possible settings of NINT
and NGROUP that fulfill the SNR requirement (and other constraints).
The final output table is then filtered, removing those exposure settings
that have the same exact acquisition times but worse SNR. Further "obvious"
comparisons are done that exclude settings where there is another setting
that has both better SNR and less acquisition time. The best results are
then sorted by an efficiency metric (SNR / sqrt(acq_time)). To skip filtering
of results, set return_full_table=True.
The result is an AstroPy Table.
Parameters
----------
sp : :mod:`pysynphot.spectrum`
A pysynphot spectral object to calculate SNR.
sp_bright : :mod:`pysynphot.spectrum`, None
Same as sp, but optionally used to calculate the saturation limit
(treated as brightest source in field). If a coronagraphic mask
observation, then this source is assumed to be occulted and
sp is fully unocculted.
is_extended : bool
Treat sp source as extended object, then in units/arcsec^2
snr_goal : float
Minimum required SNR for source. For grism, this is the average
SNR for all wavelength.
snr_frac : float
Give fractional buffer room rather than strict SNR cut-off.
tacq_max : float
Maximum amount of acquisition time in seconds to consider.
tacq_frac : float
Fractional amount of time to consider exceeding tacq_max.
patterns : numpy array
Subset of MULTIACCUM patterns to check, otherwise check all.
nint_min/max : int
Min/max number of desired integrations.
ng_min/max : int
Min/max number of desired groups in a ramp.
well_frac_max : float
Maximum level that the pixel well is allowed to be filled.
Fractions greater than 1 imply hard saturation, but the reported
SNR will not be aware of any saturation that may occur to sp.
even_nints : bool
Return only the even NINTS
return_full_table : bool
Don't filter or sort the final results (ingores event_ints).
verbose : bool
Prints out top 10 results.
Keyword Args
------------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation. If not given, will use the
average of visible calendar days.
ideal_Poisson : bool
Use total signal for noise estimate?
Otherwise MULTIACCUM equation is used.
Default = True
rad_EE : int
Extraction aperture radius (in pixels) for imaging mode.
dw_bin : float
Delta wavelength to calculate spectral sensitivities for
grisms and DHS.
ap_spec : float, int
Instead of dw_bin, specify the spectral extraction aperture
in pixels. Takes priority over dw_bin. Value will get rounded
up to nearest int.
Note
----
The keyword arguments ra, dec, thisday are not recommended for use
given the amount of time it takes to query the web server.
Instead, use :meth:`bg_zodi` to match a zfact estimate.
Returns
-------
astropy table
A sorted and filtered table of ramp options.
"""
def parse_snr(snr, grism_obs, ind_snr):
if grism_obs:
res = snr['snr']
return np.median(res)
else:
return snr[ind_snr]['snr']
pupil_mask = self.pupil_mask
grism_obs = self.is_grism
dhs_obs = (pupil_mask is not None) and ('DHS' in pupil_mask)
det_params_orig = self.det_info.copy()
if dhs_obs:
raise NotImplementedError('DHS has yet to be fully included.')
if grism_obs and is_extended:
raise NotImplementedError('Extended objects not implemented for grism observations.')
if (snr_goal is not None) and (tacq_max is not None):
raise ValueError('Keywords snr_goal and tacq_max are mutually exclusive.')
if (snr_goal is None) and (tacq_max is None):
raise ValueError('Must set either snr_goal or tacq_max.')
# Brightest source in field
if sp_bright is None:
sp_bright = sp
gen_psf = self.calc_psf_from_coeff
kw_gen_psf = {'return_oversample': False,'return_hdul': False}
# Generate PSFs for faint and bright objects and get max pixel flux
# Only necessary for point sources
if is_extended:
ind_snr = 1
obs = S.Observation(sp, self.bandpass, binset=self.bandpass.wave)
psf_faint = obs.countrate() * self.pixelscale**2
psf_bright = gen_psf(sp=sp_bright, use_bg_psf=False, **kw_gen_psf)
pix_count_rate = np.max([psf_bright.max(), psf_faint])
else:
ind_snr = 0
if grism_obs:
_, psf_bright = gen_psf(sp=sp_bright, use_bg_psf=False, **kw_gen_psf)
_, psf_faint = gen_psf(sp=sp, use_bg_psf=True, **kw_gen_psf)
else:
psf_bright = gen_psf(sp=sp_bright, use_bg_psf=False, **kw_gen_psf)
psf_faint = gen_psf(sp=sp, use_bg_psf=True, **kw_gen_psf)
pix_count_rate = np.max([psf_bright.max(), psf_faint.max()])
image = self.sensitivity(sp=sp, forwardSNR=True, return_image=True, **kwargs)
# Correctly format patterns
pattern_settings = self.multiaccum._pattern_settings
if patterns is None:
patterns = list(pattern_settings.keys())
if not isinstance(patterns, list):
patterns = [patterns]
m = np.zeros(len(patterns))
s = np.zeros(len(patterns))
for i,patt in enumerate(patterns):
v1,v2,v3 = pattern_settings.get(patt)
m[i] = v1
s[i] = v2
# Sort by nf (m+s) then by m
isort = np.lexsort((m,m+s))
patterns = list(np.array(patterns)[isort])
patterns.sort()
log_prev = conf.logging_level
setup_logging("WARN", verbose=False)
rows = []
if tacq_max is not None:
# Cycle through each readout pattern
for read_mode in patterns:
if verbose: print(read_mode)
# Maximum allowed groups for given readout pattern
_,_,ngroup_max = pattern_settings.get(read_mode)
if ng_max is not None:
ngroup_max = ng_max
nng = ngroup_max - ng_min + 1
if nng>30:
_log.warning(f'Cycling through {nng} NGROUPs. This may take a while!')
for ng in range(ng_min,ngroup_max+1):
self.update_detectors(read_mode=read_mode, ngroup=ng, nint=1)
mtimes = self.multiaccum_times
# Get saturation level of observation
# Total time spent integrating minus the reset frame
int_time = mtimes['t_int']
well_frac = pix_count_rate * int_time / self.well_level
# If above well_frac_max, then this setting is invalid
# Also, all subsequent values of ng will be too high
# so just break out of for loop.
if well_frac > well_frac_max:
break
# Approximate integrations needed to obtain required t_acq
nint1 = int(((1-tacq_frac)*tacq_max) / mtimes['t_acq'])
nint2 = int(((1+tacq_frac)*tacq_max) / mtimes['t_acq'] + 0.5)
nint1 = np.max([nint1,nint_min])
nint2 = np.min([nint2,nint_max])
nint_all = np.arange(nint1, nint2+1)
narr = len(nint_all)
# Sometimes there are a lot of nint values to check
# Let's pair down to <5 per ng
if narr>5:
i1 = int(narr/2-2)
i2 = i1 + 5
nint_all = nint_all[i1:i2]
#print(len(nint_all))
for nint in nint_all:
if nint > nint_max:
break
self.update_detectors(nint=nint)
mtimes = self.multiaccum_times
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
rows.append((read_mode, ng, nint, mtimes['t_int'], mtimes['t_exp'], \
mtimes['t_acq'], snr, well_frac))
elif snr_goal is not None:
for i,read_mode in enumerate(patterns):
if verbose: print(read_mode)
# Maximum allowed groups for given readout pattern
_,_,ngroup_max = pattern_settings.get(read_mode)
if ng_max is not None:
ngroup_max = ng_max #np.min([ng_max,ngroup_max])
nng = ngroup_max - ng_min + 1
if nng>20:
_log.warning(f'Cycling through {nng} NGROUPs. This may take a while!')
ng_saved = False
for ng in range(ng_min,ngroup_max+1):
self.update_detectors(read_mode=read_mode, ngroup=ng, nint=1)
mtimes = self.multiaccum_times
# Get saturation level of observation
int_time = mtimes['t_int']
well_frac = pix_count_rate * int_time / self.well_level
# If above well_frac_max, then this setting is invalid
if well_frac > well_frac_max:
continue
# Get SNR (assumes no saturation)
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
# Approximate integrations needed to get to required SNR
nint = int((snr_goal / snr)**2)
nint = np.max([nint_min,nint])
if nint>nint_max:
continue
# Find NINT with SNR > 0.95 snr_goal
self.update_detectors(nint=nint)
mtimes = self.multiaccum_times
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
while (snr<((1-snr_frac)*snr_goal)) and (nint<=nint_max):
nint += 1
self.update_detectors(nint=nint)
mtimes = self.multiaccum_times
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
# Skip if NINT
if (nint > nint_max):# or :
continue
# We want to make sure that at least one NINT setting is saved
# if the resulting SNR is higher than our stated goal.
if (snr > ((1+snr_frac)*snr_goal)) and ng_saved:
continue
rows.append((read_mode, ng, nint, mtimes['t_int'], mtimes['t_exp'], \
mtimes['t_acq'], snr, well_frac))
ng_saved = True
# Increment NINT until SNR > 1.05 snr_goal
# Add each NINT to table output
while (snr < ((1+snr_frac)*snr_goal)) and (nint<=nint_max):
nint += 1
if (nint > nint_max): break # double-check
self.update_detectors(nint=nint)
sen = self.sensitivity(sp=sp, forwardSNR=True, image=image, **kwargs)
snr = parse_snr(sen, grism_obs, ind_snr)
mtimes = self.multiaccum_times
rows.append((read_mode, ng, nint, mtimes['t_int'], mtimes['t_exp'], \
mtimes['t_acq'], snr, well_frac))
# Return to detector mode to original parameters
self.update_detectors(**det_params_orig)
setup_logging(log_prev, verbose=False)
names = ('Pattern', 'NGRP', 'NINT', 't_int', 't_exp', 't_acq', 'SNR', 'Well')
if len(rows)==0:
_log.warning('No ramp settings allowed within constraints! Reduce constraints.')
return Table(names=names)
# Place rows into a AstroPy Table
t_all = Table(rows=rows, names=names)
t_all['Pattern'].format = '<10'
t_all['t_int'].format = '9.2f'
t_all['t_exp'].format = '9.2f'
t_all['t_acq'].format = '9.2f'
t_all['SNR'].format = '8.1f'
t_all['Well'].format = '8.3f'
t_all['eff'] = t_all['SNR'] / np.sqrt(t_all['t_acq'])
# Round to 3 sig digits
t_all['eff'] = (1000*t_all['eff']).astype(int) / 1000.
t_all['eff'].format = '8.3f'
# Filter table?
if return_full_table:
# Sort by efficiency, then acq time
ind_sort = np.lexsort((t_all['t_acq'],1/t_all['eff']))
t_all = t_all[ind_sort]
if verbose:
print("Top 10 results sorted by 'efficiency' [SNR/sqrt(t_acq)]:")
print(t_all[0:10])
else:
t_all = table_filter(t_all, **kwargs)
ind_sort = np.lexsort((t_all['t_acq'],1/t_all['eff']))
t_all = t_all[ind_sort]
# Select only even integrations
if even_nints:
ind = (t_all['NINT'] % 2 == 0)
t_all = t_all[ind]
if verbose: print(t_all)
return t_all
def gen_psfs_over_fov(self, sptype='G0V', wfe_drift=0, osamp=1, npsf_per_full_fov=15,
return_coords=None, use_coeff=True, **kwargs):
"""Create PSF grid over full field of view
Wrapper around `calc_psfs_grid` that returns normalized PSFs across
the field of view.
Create a grid of PSFs across instrument aperture FoV. By default,
imaging observations will be for full detector FoV with regularly
spaced grid. Coronagraphic observations will cover nominal
coronagraphic mask region (usually 10s of arcsec) and will have
logarithmically spaced values where appropriate.
Parameters
==========
sptype : str
Spectral type, such as 'A0V' or 'K2III'.
wfe_drift : float
Desired WFE drift value relative to default OPD.
osamp : int
Sampling of output PSF relative to detector sampling.
npsf_per_full_fov : int
Number of PSFs across one dimension of the instrument's field of
view. If a coronagraphic observation, then this is for the nominal
coronagrahic field of view.
return_coords : None or str
Option to also return coordinate values in desired frame
('det', 'sci', 'tel', 'idl'). Output is then xvals, yvals, hdul_psfs.
use_coeff : bool
If True, uses `calc_psf_from_coeff`, other WebbPSF's built-in `calc_psf`.
Keyword Args
============
xsci_vals: None or ndarray
Option to pass a custom grid values along x-axis in 'sci' coords.
If coronagraph, this instead corresponds to coronagraphic mask axis,
which has a slight rotation in MIRI.
ysci_vals: None or ndarray
Option to pass a custom grid values along y-axis in 'sci' coords.
If coronagraph, this instead corresponds to coronagraphic mask axis,
which has a slight rotation in MIRI.
"""
# Create input spectrum that is star normalized by unit response
bp = self.bandpass
sp = stellar_spectrum(sptype, bp.unit_response(), 'flam', bp)
return self.calc_psfs_grid(sp=sp, wfe_drift=wfe_drift, osamp=osamp,
return_coords=return_coords, use_coeff=use_coeff,
npsf_per_full_fov=npsf_per_full_fov, **kwargs)
def _gen_obs_params(self, target_name, ra, dec, date_obs, time_obs, pa_v3=0,
siaf_ap_ref=None, xyoff_idl=(0,0), visit_type='SCIENCE', time_series=False,
time_exp_offset=0, segNum=None, segTot=None, int_range=None, filename=None, **kwargs):
""" Generate a simple obs_params dictionary
An obs_params dictionary is used to create a jwst data model (e.g., Level1bModel).
Additional **kwargs will add/update elements to the final output dictionary.
Parameters
==========
ra : float
RA in degrees associated with observation pointing
dec : float
RA in degrees associated with observation pointing
data_obs : str
YYYY-MM-DD
time_obs : str
HH:MM:SS
Keyword Arg
===========
pa_v3 : float
Telescope V3 position angle.
siaf_ap_ref : pysiaf Aperture
SIAF aperture class used for telescope pointing (if different than self.siaf_ap)
xyoff_idl : tuple, list
(x,y) offset in arcsec ('idl' coords) to dither observation
visit_type : str
'T_ACQ', 'CONFIRM', or 'SCIENCE'
time_series : bool
Is this a time series observation?
time_exp_offset : float
Exposure start time (in seconds) relative to beginning of observation execution.
segNum : int
The segment number of the current product. Only for TSO.
segTot : int
The total number of segments. Only for TSO.
int_range : list
Integration indices to use
filename : str or None
Name of output filename. If set to None, then auto generates a dummy name.
"""
from .simul.apt import create_obs_params
from .simul.dms import DMS_filename
filt = self.filter
pupil = 'CLEAR' if self.pupil_mask is None else self.pupil_mask
mask = 'None' if self.image_mask is None else self.image_mask
det = self.Detector
siaf_ap_obs = self.siaf_ap
if siaf_ap_ref is None:
siaf_ap_ref = self.siaf_ap
ra_dec = (ra, dec)
kwargs['target_name'] = target_name
kwargs['nexposures'] = 1
obs_params = create_obs_params(filt, pupil, mask, det, siaf_ap_ref, ra_dec, date_obs, time_obs,
pa_v3=pa_v3, siaf_ap_obs=siaf_ap_obs, xyoff_idl=xyoff_idl, time_exp_offset=time_exp_offset,
visit_type=visit_type, time_series=time_series, segNum=segNum, segTot=segTot, int_range=int_range,
filename=filename, **kwargs)
if filename is None:
obs_id_info = obs_params['obs_id_info']
detname = det.detid
filename = DMS_filename(obs_id_info, detname, segNum=segNum, prodType='uncal')
obs_params['filename'] = filename
return obs_params
def simulate_ramps(self, sp=None, im_slope=None, cframe='sci', nint=None,
do_dark=False, rand_seed=None, **kwargs):
""" Simulate Ramp Data
Create a series of ramp data based on the current NIRCam settings.
This method calls the :func:`gen_ramp` function, which in turn calls
the detector noise generator :func:`~pynrc.simul.simulate_detector_ramp`.
Parameters
----------
im_slope : numpy array, None
Pass the slope image directly. If not set, then a slope
image will be created from the input spectrum keyword. This
should include zodiacal light emission, but not dark current.
Make sure this array is in detector coordinates.
sp : :mod:`pysynphot.spectrum`, None
A pysynphot spectral object. If not specified, then it is
assumed that we're looking at blank sky.
cframe : str
Output coordinate frame, 'sci' or 'det'.
nint : None or int
Options to specify arbitrary number of integrations.
do_dark : bool
Make a dark ramp (ie., pupil_mask='FLAT'), no external flux.
Keyword Args
------------
zfact : float
Factor to scale Zodiacal spectrum (default 2.5)
ra : float
Right ascension in decimal degrees
dec : float
Declination in decimal degrees
thisday : int
Calendar day to use for background calculation. If not given, will use the
average of visible calendar days.
return_full_ramp : bool
By default, we average groups and drop frames as specified in the
`det` input. If this keyword is set to True, then return all raw
frames within the ramp. The last set of `nd2` frames will be omitted.
out_ADU : bool
If true, divide by gain and convert to 16-bit UINT.
include_dark : bool
Add dark current?
include_bias : bool
Add detector bias?
include_ktc : bool
Add kTC noise?
include_rn : bool
Add readout noise per frame?
include_cpink : bool
Add correlated 1/f noise to all amplifiers?
include_upink : bool
Add uncorrelated 1/f noise to each amplifier?
include_acn : bool
Add alternating column noise?
apply_ipc : bool
Include interpixel capacitance?
apply_ppc : bool
Apply post-pixel coupling to linear analog signal?
include_refoffsets : bool
Include reference offsts between amplifiers and odd/even columns?
include_refinst : bool
Include reference/active pixel instabilities?
include_colnoise : bool
Add in column noise per integration?
col_noise : ndarray or None
Option to explicitly specifiy column noise distribution in
order to shift by one for subsequent integrations
amp_crosstalk : bool
Crosstalk between amplifiers?
add_crs : bool
Add cosmic ray events? See Robberto et al 2010 (JWST-STScI-001928).
cr_model: str
Cosmic ray model to use: 'SUNMAX', 'SUNMIN', or 'FLARES'.
cr_scale: float
Scale factor for probabilities.
apply_nonlinearity : bool
Apply non-linearity?
random_nonlin : bool
Add randomness to the linearity coefficients?
apply_flats: bool
Apply sub-pixel QE variations (crosshatching)?
latents : None or ndarray
(TODO) Apply persistence from previous integration.
"""
from .reduce.calib import nircam_cal
rng = np.random.default_rng(rand_seed)
det = self.Detector
nint = det.multiaccum.nint if nint is None else nint
pupil_mask = 'FLAT' if do_dark else self.pupil_mask
xpix = self.det_info['xpix']
ypix = self.det_info['ypix']
# Set logging to WARNING to suppress messages
log_prev = conf.logging_level
setup_logging('WARN', verbose=False)
det_cal_obj = nircam_cal(self.scaid, verbose=False)
# If requesting dark images
if do_dark:
im_slope = np.zeros([ypix,xpix])
# If slope image is not specified
elif im_slope is None:
# Detector sampled images
gen_psf = self.calc_psf_from_coeff
kw_gen_psf = {'return_oversample': False,'return_hdul': False}
# Imaging+Coronagraphy
if pupil_mask is None:
im_slope = gen_psf(sp=sp, **kw_gen_psf)
# No visible source
elif ('FLAT' in pupil_mask) or (sp is None):
im_slope = np.zeros([ypix,xpix])
# Grism spec
elif ('GRISM' in pupil_mask):
w, im_slope = gen_psf(sp=sp, **kw_gen_psf)
# DHS spectroscopy
elif ('DHS' in pupil_mask):
raise NotImplementedError('DHS has yet to be fully included')
# Imaging+Coronagraphy
else:
im_slope = gen_psf(sp=sp, **kw_gen_psf)
# Expand or cut to detector size
im_slope = pad_or_cut_to_size(im_slope, (ypix,xpix))
# Add in Zodi emission
# Returns 0 if self.pupil_mask='FLAT'
im_slope += self.bg_zodi_image(**kwargs)
# Minimum value of slope
im_min = im_slope[im_slope>=0].min()
# Expand or cut to detector size
im_slope = pad_or_cut_to_size(im_slope, (ypix,xpix))
# Make sure there are no negative numbers
im_slope[im_slope<=0] = im_min
# Create a list of arguments to pass
worker_arguments = []
for i in range(nint):
rseed_i = rng.integers(0,2**32-1)
kw = {'im_slope': im_slope, 'cframe': cframe,
'return_zero_frame': True, 'rand_seed': rseed_i}
kws = merge_dicts(kw, kwargs)
args = (det, det_cal_obj)
worker_arguments.append((args, kws))
res_zeros = []
res_ramps = []
for wa in tqdm(worker_arguments, desc='Ramps', leave=False):
out = gen_ramps(wa)
res_ramps.append(out[0])
res_zeros.append(out[1])
setup_logging(log_prev, verbose=False)
return np.asarray(res_ramps), np.asarray(res_zeros)
def simulate_level1b(self, target_name, ra, dec, date_obs, time_obs,
sp=None, im_slope=None, cframe='sci', nint=None, do_dark=False,
save_dir=None, return_model=False, return_hdul=False, **kwargs):
""" Simulate DMS Level 1b data model """
from .simul.dms import level1b_data_model, save_level1b_fits
from stdatamodels import fits_support
# Update total number of integrations
if nint is not None:
nint_orig = self.Detector.multiaccum.nint
self.update_detectors(nint=nint)
kwargs['out_ADU'] = True
sci_data, zero_data = self.simulate_ramps(sp=sp, im_slope=im_slope, cframe=cframe, nint=nint,
do_dark=do_dark, **kwargs)
obs_params = self._gen_obs_params(target_name, ra, dec, date_obs, time_obs, **kwargs)
obs_params['save_dir'] = save_dir
outModel = level1b_data_model(obs_params, sci_data=sci_data, zero_data=zero_data)
if save_dir:
save_level1b_fits(outModel, obs_params, save_dir=save_dir)
# Return number of integrations
if nint is not None:
self.update_detectors(nint=nint_orig)
if return_hdul:
out_hdul, out_asdf = fits_support.to_fits(outModel._instance, outModel._schema)
if return_model and return_hdul:
return outModel, out_hdul
elif return_model:
return outModel
elif return_hdul:
return out_hdul
def table_filter(t, topn=None, **kwargs):
"""Filter and sort table.
Filter a resulting ramp table to exclude those with worse SNR for the same
or larger tacq. This is performed on a pattern-specific basis and returns
the Top N rows for each readout patten. The rows are ranked by an efficiency
metric, which is simply SNR / sqrt(tacq). If topn is set to None, then all
values that make the cut are returned (sorted by the efficiency metric).
Args
----
topn : int, None
Maximum number of rows to keep.
"""
if topn is None: topn = len(t)
temp = multiaccum()
pattern_settings = temp._pattern_settings
patterns = np.unique(t['Pattern'])
m = np.zeros(len(patterns))
s = np.zeros(len(patterns))
for i,patt in enumerate(patterns):
v1,v2,v3 = pattern_settings.get(patt)
m[i] = v1
s[i] = v2
# Sort by nf (m+s) then by m
isort = np.lexsort((m,m+s))
patterns = list(np.array(patterns)[isort])
tnew = t.copy()
tnew.remove_rows(np.arange(len(t)))
for pattern in patterns:
rows = t[t['Pattern']==pattern]
# For equivalent acquisition times, remove worse SNR
t_uniq = np.unique(rows['t_acq'])
ind_good = []
for tacq in t_uniq:
ind = np.where(rows['t_acq']==tacq)[0]
ind_snr_best = rows['SNR'][ind]==rows['SNR'][ind].max()
ind_good.append(ind[ind_snr_best][0])
rows = rows[ind_good]
# For each remaining row, exlude those that take longer with worse SNR than any other row
ind_bad = []
ind_bad_comp = []
for i,row in enumerate(rows):
for j,row_compare in enumerate(rows):
if i==j: continue
if (row['t_acq']>row_compare['t_acq']) and (row['SNR']<=(row_compare['SNR'])):
ind_bad.append(i)
ind_bad_comp.append(j)
break
rows.remove_rows(ind_bad)
isort = np.lexsort((rows['t_acq'],1/rows['eff']))
for row in rows[isort][0:topn]:
tnew.add_row(row)
return tnew
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict.
If the same key appars multiple times, priority goes to key/value
pairs in latter dicts.
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def gen_ramps(args):
"""
Helper function for generating FITs integrations from a slope image
"""
from .simul.ngNRC import simulate_detector_ramp
args_orig, kwargs = args
try:
res = simulate_detector_ramp(*args_orig, **kwargs)
except Exception as e:
print('Caught exception in worker thread:')
# This prints the type, value, and stack trace of the
# current exception being handled.
traceback.print_exc()
print()
raise e
return res
def nproc_use_ng(det, nint=None):
"""Optimize processor usage.
Attempt to estimate a reasonable number of processes to use for multiple
simultaneous slope_to_ramp() calculations. We attempt to estimate how many
calculations can happen in parallel without swapping to disk.
NOTE: Requires psutil package. Otherwise defaults to mp.cpu_count() / 2
Parameters
-----------
det : :class:`DetectorOps`
Input detector class
"""
import multiprocessing as mp
try:
import psutil
except ImportError:
nproc = int(mp.cpu_count() // 2)
if nproc < 1: nproc = 1
_log.info("No psutil package available, cannot estimate optimal nprocesses.")
_log.info("Returning nproc=ncpu/2={}.".format(nproc))
return nproc
ma = det.multiaccum
nd1 = ma.nd1
nd2 = ma.nd2
nf = ma.nf
ngroup = ma.ngroup
nint = ma.nint if nint is None else nint
naxis3 = nd1 + ngroup*nf + (ngroup-1)*nd2
# Compute the number of time steps per integration, per output
nstep_frame = (det.chsize+12) * (det.ypix+1)
nstep = nstep_frame * naxis3
# Pad nsteps to a power of 2, which is much faster
nstep2 = int(2**np.ceil(np.log2(nstep)))
# Memory formulas are based on fits to memory usage
# In GBytes
cf = np.array([1.48561822e-15, 7.02203657e-08, 2.52022191e-01])
mem_total = np.polynomial.polynomial.polyval(nstep2, cf[::-1])
# Available memory
mem = psutil.virtual_memory()
avail_GB = mem.available / (1024**3) - 1.0 # Leave 1 GB
# How many processors to split into?
nproc = avail_GB // mem_total
nproc = np.min([nproc, mp.cpu_count(), poppy.conf.n_processes])
if nint is not None:
nproc = np.min([nproc, nint])
# Resource optimization:
# Split iterations evenly over processors to free up minimally used processors.
# For example, if there are 5 processors only doing 1 iteration and a single
# processor doing 2 iterations, those 5 processors (and their memory) will not
# get freed until the final processor is finished. So, to minimize the number
# of idle resources, take the total iterations and divide by two (round up),
# and that should be the final number of processors to use.
np_max = np.ceil(nint / nproc)
nproc = int(np.ceil(nint / np_max))
if nproc < 1: nproc = 1
return int(nproc)
def saturation_limits(inst, psf_coeff=None, psf_coeff_hdr=None, sp=None, bp_lim=None,
int_time=21.47354, full_well=None, well_frac=0.8, units='vegamag',
verbose=False, **kwargs):
"""Saturation limits
Estimate the saturation limit of a point source for some bandpass.
By default, it outputs the max K-Band magnitude assuming a G2V star,
following the convention on the UA NIRCam webpage. This can be useful if
one doesn't know how bright a source is in the selected NIRCam filter
bandpass. Returns the saturation limit in Vega magnitudes by default,
however, any flux unit supported by Pysynphot is possible via the 'units'
keyword.
Parameters
==========
inst : NIRCam class
pynrc or webbpsf_ext or webbpsf
psf_coeff : ndarray
A cube of polynomial coefficients for generating PSFs. This is generally
oversampled with a shape (fov_pix*oversamp, fov_pix*oversamp, deg).
If not set, defaults to `inst.psf_coeff`.
psf_coeff_hdr : FITS header
Header information saved while generating coefficients.
sp : Pysynphot spectrum
Spectrum to calculate saturation (default: G2V star).
bp_lim : Pysynphot bandpass
The bandpass at which we report the magnitude that will saturate the NIRCam
band assuming some spectrum sp (default: 2MASS K-Band).
int_time : float
Integration time in seconds (default corresponds to 2 full frames).
full_well : float
Detector full well level in electrons. If not set, defaults to `inst.well_level`.
well_frac : float
Fraction of full well to consider "saturated." 0.8 by default.
units : str
Output units for saturation limit.
"""
from webbpsf_ext.psfs import gen_image_from_coeff
# Instrument bandpass
bp = inst.bandpass
# bandpass at which we report the magnitude that will saturate the NIRCam band assuming some spectrum sp
if bp_lim is None:
bp_lim = bp_2mass('k')
bp_lim.name = 'K-Band'
# Spectrum and bandpass to report magnitude that saturates NIRCam band
if sp is None:
sp = stellar_spectrum('G2V')
# Just for good measure, make sure we're all in the same wave units
bp_lim.convert(bp.waveunits)
sp.convert(bp.waveunits)
# Renormalize to 10th magnitude star (Vega mags)
mag_norm = 10.0
sp_norm = sp.renorm(mag_norm, 'vegamag', bp_lim)
sp_norm.name = sp.name
# Set up an observation of the spectrum using the specified bandpass
# Use the bandpass wavelengths to bin the fluxes
obs = S.Observation(sp_norm, bp, binset=bp.wave)
# Convert observation to counts (e/sec)
obs.convert('counts')
# Zodiacal Light contributions to background
sp_zodi = zodi_spec(**kwargs)
pix_scale = inst.pixelscale
obs_zodi = S.Observation(sp_zodi, bp, binset=bp.wave)
fzodi_pix = obs_zodi.countrate() * (pix_scale/206265.0)**2 # e-/sec/pixel
# Collecting area gets reduced for coronagraphic (Lyot pupil) observations
# This isn't accounted for later, because zodiacal light doesn't use PSF information
if inst.is_lyot:
fzodi_pix *= 0.19
# Total stellar flux and associated magnitude
star_flux = obs.countrate() # e/sec
mag_nrc = obs.effstim('vegamag')
_log.debug('Total Source Count Rate for {0} = {1:0.1f} mags: {2:.0f} e-/sec'.\
format(bp_lim.name, mag_norm, star_flux))
_log.debug('Magnitude in {0} band: {1:.2f}'.format(bp.name, mag_nrc))
# The number of pixels to span spatially
if psf_coeff is None:
psf_coeff = inst.psf_coeff
if psf_coeff_hdr is None:
psf_coeff_hdr = inst.psf_coeff_header
fov_pix = psf_coeff_hdr['FOVPIX']
# Generate the PSF image for analysis
# Use gen_image_from_coeff() rather than inst.calc_psf_from_coeff() in case we
# are supplying custom psf_coeff
t0 = time.time()
result = gen_image_from_coeff(inst, psf_coeff, psf_coeff_hdr,
sp_norm=sp_norm, return_oversample=False)
t1 = time.time()
_log.debug('Took %.2f seconds to generate images' % (t1-t0))
# Saturation level (some fraction of full well) in electrons
full_well = inst.well_level if full_well is None else full_well
sat_level = well_frac * full_well
# If grism spectroscopy
pupil_mask = inst.pupil_mask
if inst.is_grism:
wspec, spec = result
# Spectra are in 'sci' coords
# If GRISMC (along columns) rotate image by 90 deg CW
if (pupil_mask=='GRISMC') or (pupil_mask=='GRISM90'):
spec = np.rot90(spec, k=1)
elif inst.module=='B':
# Flip left to right so dispersion is in same direction as mod A
spec = spec[:,::-1]
wspec = wspec[::-1]
# Time to saturation for 10-mag source
sat_time = sat_level / spec
_log.debug('Approximate Time to {1:.2f} of Saturation: {0:.1f} sec'.\
format(sat_time.min(),well_frac))
# Magnitude necessary to saturate a given pixel
ratio = int_time / sat_time
ratio[ratio < __epsilon] = __epsilon
sat_mag = mag_norm + 2.5*np.log10(ratio)
# Wavelengths to grab saturation values
igood2 = bp.throughput > (bp.throughput.max()/4)
wgood2 = bp.wave[igood2] / 1e4
wsat_arr = np.unique((wgood2*10 + 0.5).astype('int')) / 10
wdel = wsat_arr[1] - wsat_arr[0]
msat_arr = []
for w in wsat_arr:
l1 = w - wdel / 4
l2 = w + wdel / 4
ind = ((wspec > l1) & (wspec <= l2))
msat = sat_mag[fov_pix//2-1:fov_pix//2+2, ind].max()
sp_temp = sp.renorm(msat, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
msat_arr.append(obs_temp.effstim(units))
msat_arr = np.array(msat_arr)
# Print verbose information
if verbose:
if bp_lim.name == bp.name:
print('{0} Saturation Limit assuming {1} source:'.\
format(bp_lim.name,sp.name))
else:
print('{0} Saturation Limit for {1} assuming {2} source:'.\
format(bp_lim.name,bp.name,sp.name))
names = ('Wave','Sat Limit ({})'.format(units))
tbl = Table([wsat_arr,msat_arr], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
# Return saturation list along with corresponding wavelengths to dictionary
return {'wave':wsat_arr.tolist(), 'satmag':msat_arr.tolist(),
'units':units, 'Spectrum':sp_norm.name, 'bp_lim':bp_lim.name}
# DHS spectroscopy
elif (pupil_mask is not None) and ('DHS' in pupil_mask):
raise NotImplementedError('DHS not implemented')
# Imaging
else:
psf = result
# Time to saturation for 10-mag source
# Only need the maximum pixel value
sat_time = sat_level / psf.max()
_log.debug(f'Point source approximate Time to {well_frac:.2f} of Saturation: {sat_time:.2f} sec')
# Magnitude necessary to saturate a given pixel
ratio = int_time/sat_time
sat_mag = mag_norm + 2.5*np.log10(ratio)
# Convert to desired unit
sp_temp = sp.renorm(sat_mag, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
res1 = obs_temp.effstim(units)
out1 = {'satlim':res1, 'units':units, 'bp_lim':bp_lim.name, 'Spectrum':sp_norm.name}
# For surface brightness saturation (extended object)
# Assume the fiducial (sp_norm) to be in terms of mag/arcsec^2
# Multiply countrate() by pix_scale^2 to get in terms of per pixel (area)
# This is the count rate per pixel for the fiducial starting point
image_ext = obs.countrate() * pix_scale**2 # e-/sec/pixel
sat_time = sat_level / image_ext
_log.debug(f'Extended object approximate Time to {well_frac:.2f} of Saturation: {sat_time:.2f} sec')
# Magnitude necessary to saturate a given pixel
ratio = int_time / sat_time
sat_mag_ext = mag_norm + 2.5*np.log10(ratio)
# Convert to desired units
sp_temp = sp.renorm(sat_mag_ext, 'vegamag', bp_lim)
obs_temp = S.Observation(sp_temp, bp_lim, binset=bp_lim.wave)
res2 = obs_temp.effstim(units)
out2 = out1.copy()
out2['satlim'] = res2
out2['units'] = units + '/arcsec^2'
# Print verbose information
if verbose:
if bp_lim.name == bp.name:
print('{} Saturation Limit assuming {} source (point source): {:.2f} {}'.\
format(bp_lim.name, sp_norm.name, out1['satlim'], out1['units']) )
print('{} Saturation Limit assuming {} source (extended): {:.2f} {}'.\
format(bp_lim.name, sp_norm.name, out2['satlim'], out2['units']) )
else:
print('{} Saturation Limit for {} assuming {} source (point source): {:.2f} {}'.\
format(bp_lim.name, bp.name, sp_norm.name, out1['satlim'], out1['units']) )
print('{} Saturation Limit for {} assuming {} source (extended): {:.2f} {}'.\
format(bp_lim.name, bp.name, sp_norm.name, out2['satlim'], out2['units']) )
return out1, out2
def _mlim_helper(sub_im, mag_norm=10, mag_arr=np.arange(5,35,1),
nsig=5, nint=1, snr_fact=1, forwardSNR=False, **kwargs):
"""Helper function for determining grism sensitivities"""
sub_im_sum = sub_im.sum()
# Just return the SNR for the input sub image
if forwardSNR:
im_var = pix_noise(fsrc=sub_im, **kwargs)**2
ns_sum = np.sqrt(np.sum(im_var) / nint)
return snr_fact * sub_im_sum / ns_sum
fact_arr = 10**((mag_arr-mag_norm)/2.5)
snr_arr = []
for f in fact_arr:
im = sub_im / f
im_var = pix_noise(fsrc=im, **kwargs)**2
im_sum = sub_im_sum / f
ns_sum = np.sqrt(np.sum(im_var) / nint)
snr_arr.append(im_sum / ns_sum)
snr_arr = snr_fact*np.asarray(snr_arr)
return np.interp(nsig, snr_arr[::-1], mag_arr[::-1])
def sensitivities(inst, psf_coeff=None, psf_coeff_hdr=None, sp=None, units=None,
forwardSNR=False, nsig=10, tf=10.737, ngroup=2, nf=1, nd2=0, nint=1,
return_image=False, image=None, cr_noise=True,
dw_bin=None, ap_spec=None, rad_EE=None, verbose=False, **kwargs):
"""Sensitivity Estimates
Estimates the sensitivity for a set of instrument parameters.
By default, a flat spectrum is convolved with the specified bandpass.
For imaging, this function also returns the surface brightness sensitivity.
The number of photo-electrons are computed for a source at some magnitude
as well as the noise from the detector readout and some average zodiacal
background flux. Detector readout noise follows an analytical form that
matches extensive long dark observations during cryo-vac testing.
This function returns the n-sigma background limit in units of uJy (unless
otherwise specified; valid units can be found on the Pysynphot webpage at
https://pysynphot.readthedocs.io/).
For imaging, a single value is given assuming aperture photometry with a
radius of ~1 FWHM rounded to the next highest integer pixel (or 2.5 pixels,
whichever is larger). For spectral observations, this function returns an
array of sensitivities at 0.1um intervals with apertures corresponding to
2 spectral pixels and a number of spatial pixels equivalent to 1 FWHM rounded
to the next highest integer (minimum of 5 spatial pixels).
Parameters
==========
Instrument Settings
-------------------
filter_or_bp : Either the name of the filter or pre-computed Pysynphot bandpass.
pupil : NIRCam pupil elements such as grisms or lyot stops
mask : Specify the coronagraphic occulter (spots or bar)
module : 'A' or 'B'
pix_scale : Pixel scale in arcsec/pixel
Spectrum Settings
-------------------
sp : A pysynphot spectral object to calculate sensitivity
(default: Flat spectrum in photlam)
nsig : Desired nsigma sensitivity
units : Output units (defaults to uJy for grisms, nJy for imaging)
forwardSNR : Find the SNR of the input spectrum instead of determining sensitivity.
Ramp Settings
-------------------
tf : Time per frame
ngroup : Number of groups per integration
nf : Number of averaged frames per group
nd2 : Number of dropped frames per group
nint : Number of integrations/ramps to consider
PSF Information
-------------------
coeff : A cube of polynomial coefficients for generating PSFs. This is
generally oversampled with a shape (fov_pix*oversamp, fov_pix*oversamp, deg).
If not set, this will be calculated using :func:`gen_psf_coeff`.
coeff_hdr : Header associated with coeff cube.
fov_pix : Number of detector pixels in the image coefficient and PSF.
oversample : Factor of oversampling of detector pixels.
offset_r : Radial offset of the target from center.
offset_theta : Position angle for that offset, in degrees CCW (+Y).
Misc.
-------------------
image : Explicitly pass image data rather than calculating from coeff.
return_image : Instead of calculating sensitivity, return the image calced from coeff.
Useful if needing to calculate sensitivities for many different settings.
rad_EE : Extraction aperture radius (in pixels) for imaging mode.
dw_bin : Delta wavelength to calculate spectral sensitivities (grisms & DHS).
ap_spec : Instead of dw_bin, specify the spectral extraction aperture in pixels.
Takes priority over dw_bin. Value will get rounded up to nearest int.
cr_noise : Include noise from cosmic ray hits?
Keyword Args
-------------------
zodi_spec - zfact, ra, dec, thisday, [locstr, year, day]
pix_noise - rn, ktc, idark, and p_excess
gen_psf_coeff - npsf and ndeg
read_filter - ND_acq
"""
# PSF coefficients
from webbpsf_ext.psfs import gen_image_from_coeff
from webbpsf_ext.bandpasses import bp_igood
pupil_mask = inst.pupil_mask
grism_obs = inst.is_grism
dhs_obs = (pupil_mask is not None) and ('DHS' in pupil_mask)
lyot_obs = inst.is_lyot
coron_obs = inst.is_coron
# Get filter throughput and create bandpass
bp = inst.bandpass
filter = inst.filter
waveset = np.copy(bp.wave)
# Pixel scale (arcsec/pixel)
pix_scale = inst.pixelscale
# Spectrum and bandpass to report magnitude that saturates NIRCam band
if sp is None:
sp = S.ArraySpectrum(waveset, 0*waveset + 10.)
sp.name = 'Flat spectrum in photlam'
if forwardSNR:
sp_norm = sp
else:
# Renormalize to 10th magnitude star
mag_norm = 10
sp_norm = sp.renorm(mag_norm, 'vegamag', bp)
sp_norm.name = sp.name
# Zodiacal Light Stuff
sp_zodi = zodi_spec(**kwargs)
obs_zodi = S.Observation(sp_zodi, bp, binset=waveset)
fzodi_pix = obs_zodi.countrate() * (pix_scale/206265.0)**2 # e-/sec/pixel
# Collecting area gets reduced for coronagraphic observations
# This isn't accounted for later, because zodiacal light doesn't use PSF information
if coron_obs:
fzodi_pix *= 0.19
# The number of pixels to span spatially for WebbPSF calculations
fov_pix = psf_coeff_hdr['FOVPIX']
oversample = psf_coeff_hdr['OSAMP']
# Generate the PSF image for analysis.
# This process can take a while if being done over and over again.
# Let's provide the option to skip this with a pre-generated image.
# Skip image generation if `image` keyword is not None.
# Remember, this is for a very specific NORMALIZED spectrum
t0 = time.time()
if image is None:
image = gen_image_from_coeff(inst, psf_coeff, psf_coeff_hdr,
sp_norm=sp_norm, return_oversample=False)
t1 = time.time()
_log.debug(f'fov_pix={fov_pix}, oversample={oversample}')
_log.debug('Took {:.2f} seconds to generate images'.format(t1-t0))
if return_image:
return image
# Cosmic Ray Loss (JWST-STScI-001721)
# SNR with cosmic ray events depends directly on ramp integration time
if cr_noise:
tint = (ngroup*nf + (ngroup-1)*nd2) * tf
snr_fact = 1.0 - tint*6.7781e-5
else:
snr_fact = 1.0
# If grism spectroscopy
if grism_obs:
if units is None:
units = 'uJy'
wspec, spec = image
# Spectra are in 'sci' coords
# If GRISMC (along columns) rotate image by 90 deg CW
if (pupil_mask=='GRISMC') or (pupil_mask=='GRISM90'):
spec = np.rot90(spec, k=1)
elif inst.module=='B':
# Flip left to right so dispersion is in same direction as mod A
spec = spec[:,::-1]
wspec = wspec[::-1]
# Wavelengths to grab sensitivity values
#igood2 = bp.throughput > (bp.throughput.max()/4)
igood2 = bp_igood(bp, min_trans=bp.throughput.max()/3, fext=0)
wgood2 = waveset[igood2] / 1e4
wsen_arr = np.unique((wgood2*10 + 0.5).astype('int')) / 10
# Add an addition 0.1 on either side
dw = 0.1
wsen_arr = np.concatenate(([wsen_arr.min()-dw],wsen_arr,[wsen_arr.max()+dw]))
#wdel = wsen_arr[1] - wsen_arr[0]
# FWHM at each pixel position
#fwhm_pix_arr = np.ceil(wsen_arr * 0.206265 / 6.5 / pix_scale)
# Make sure there's at least 5 total pixels in spatial dimension
#temp = fwhm_pix_arr.repeat(2).reshape([fwhm_pix_arr.size,2])
#temp[:,0] = 2
#rad_arr = temp.max(axis=1)
# Ignore the above, let's always do a 5pix spatial aperture
rad_arr = np.zeros(wsen_arr.size) + 2 # (2*2+1)
# Spatial aperture size at each wavelength
ap_spat = (2*rad_arr+1).astype('int')
# Indices with spectral image
ispat1 = (fov_pix - ap_spat) // 2
ispat2 = ispat1 + ap_spat
# Get spectral indices on the spectral image
if (dw_bin is None) and (ap_spec is None):
ap_spec = 2
elif (dw_bin is not None) and (ap_spec is None):
ap_spec = wspec.size * dw_bin / (wspec.max() - wspec.min())
ap_spec = int(ap_spec+0.5)
else:
ap_spec = int(ap_spec+0.5)
diff = abs(wspec.reshape(wspec.size,1) - wsen_arr)
ind_wave = []
for i in np.arange(wsen_arr.size):
ind = (np.where(diff[:,i]==min(diff[:,i])))[0]
ind_wave.append(ind[0])
ispec1 = np.asarray(ind_wave) - ap_spec // 2
ispec2 = ispec1 + ap_spec
# At each wavelength, grab a sub image and find the limiting magnitude
bglim_arr = []
for i in np.arange(wsen_arr.size):
sub_im = spec[ispat1[i]:ispat2[i],ispec1[i]:ispec2[i]]
if forwardSNR:
snr = _mlim_helper(sub_im, nint=nint, forwardSNR=forwardSNR,
ngroup=ngroup, nf=nf, nd2=nd2, tf=tf, fzodi=fzodi_pix,
snr_fact=snr_fact, **kwargs)
bglim_arr.append(snr)
else:
# Interpolate over a coarse magnitude grid
mag_arr=np.arange(5,35,1)
mag_lim = _mlim_helper(sub_im, mag_norm, mag_arr, nsig=nsig, nint=nint,
ngroup=ngroup, nf=nf, nd2=nd2, tf=tf, fzodi=fzodi_pix,
snr_fact=snr_fact, **kwargs)
# Zoom in and interoplate over finer grid
mag_arr = np.arange(mag_lim-1,mag_lim+1,0.05)
mag_lim = _mlim_helper(sub_im, mag_norm, mag_arr, nsig=nsig, nint=nint,
ngroup=ngroup, nf=nf, nd2=nd2, tf=tf, fzodi=fzodi_pix,
snr_fact=snr_fact, **kwargs)
# Renormalize spectrum to magnitude limit and convert to desired units
sp_norm2 = sp.renorm(mag_lim, 'vegamag', bp)
sp_norm2.convert(units)
bglim = np.interp(wsen_arr[i],sp_norm2.wave/1e4, sp_norm2.flux)
bglim_arr.append(bglim)
bglim_arr = np.asarray(bglim_arr)
# Return sensitivity list along with corresponding wavelengths to dictionary
if forwardSNR:
sp_norm.convert(units)
fvals = np.interp(wsen_arr, sp_norm.wave/1e4, sp_norm.flux)
out = {'wave':wsen_arr.tolist(), 'snr':bglim_arr.tolist(),
'flux_units':units, 'flux':fvals.tolist(), 'Spectrum':sp.name}
if verbose:
print('{0} SNR for {1} source'.format(bp.name,sp.name))
names = ('Wave','SNR','Flux ({})'.format(units))
tbl = Table([wsen_arr,bglim_arr, fvals], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
else:
out = {'wave':wsen_arr.tolist(), 'sensitivity':bglim_arr.tolist(),
'units':units, 'nsig':nsig, 'Spectrum':sp.name}
if verbose:
print('{} Background Sensitivity ({}-sigma) for {} source'.\
format(bp.name,nsig,sp.name))
names = ('Wave','Limit ({})'.format(units))
tbl = Table([wsen_arr,bglim_arr], names=names)
for k in tbl.keys():
tbl[k].format = '9.2f'
print(tbl)
return out
# DHS spectroscopy
elif dhs_obs:
raise NotImplementedError('DHS has yet to be fully included')
# Imaging (includes coronagraphy)
else:
if units is None:
units = 'nJy'
# Wavelength to grab sensitivity values
obs = S.Observation(sp_norm, bp, binset=waveset)
efflam = obs.efflam()*1e-4 # microns
# Encircled energy
rho_pix = dist_image(image)
bins = np.arange(rho_pix.min(), rho_pix.max() + 1, 1)
# Groups indices for each radial bin
igroups, _, rad_pix = hist_indices(rho_pix, bins, True)
# Sum of each radial annulus
sums = binned_statistic(igroups, image, func=np.sum)
# Encircled energy within each radius
EE_flux = np.cumsum(sums)
# How many pixels do we want?
fwhm_pix = 1.2 * efflam * 0.206265 / 6.5 / pix_scale
if rad_EE is None:
rad_EE = np.max([fwhm_pix,2.5])
npix_EE = np.pi * rad_EE**2
# For surface brightness sensitivity (extended object)
# Assume the fiducial (sp_norm) to be in terms of mag/arcsec^2
# Multiply countrate() by pix_scale^2 to get in terms of per pixel (area)
# This is the count rate per pixel for the fiducial starting point
image_ext = obs.countrate() * pix_scale**2 # e-/sec/pixel
#print(image_ext)
if forwardSNR:
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image, **kwargs)**2
# root squared sum of noise within each radius
sums = binned_statistic(igroups, im_var, func=np.sum)
EE_var = np.cumsum(sums)
EE_sig = np.sqrt(EE_var / nint)
EE_snr = snr_fact * EE_flux / EE_sig
snr_rad = np.interp(rad_EE, rad_pix, EE_snr)
flux_val = obs.effstim(units)
out1 = {'type':'Point Source', 'snr':snr_rad, 'Spectrum':sp.name,
'flux':flux_val, 'flux_units':units}
# Extended object surfrace brightness
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image_ext, **kwargs)**2
im_sig = np.sqrt(im_var*npix_EE / nint)
# Total number of pixels within r=fwhm or 2.5 pixels
fsum2 = image_ext * npix_EE
snr2 = snr_fact * fsum2 / im_sig # SNR per "resolution element"ish
out2 = {'type':'Surface Brightness', 'snr':snr2, 'Spectrum':sp.name,
'flux':flux_val, 'flux_units':units+'/arcsec^2'}
if verbose:
for out in [out1,out2]:
print('{} SNR ({:.2f} {}): {:.2f} sigma'.\
format(out['type'], out['flux'], out['flux_units'], out['snr']))
else:
# Interpolate over a coarse magnitude grid to get SNR
# Then again over a finer grid
for ii in np.arange(2):
if ii==0: mag_arr = np.arange(5,35,1)
else: mag_arr = np.arange(mag_lim-1,mag_lim+1,0.05)
fact_arr = 10**((mag_arr-mag_norm)/2.5)
snr_arr = []
for f in fact_arr:
#im_var = image/f/tint + var_const
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image/f, **kwargs)**2
# root squared sum of noise within each radius
sums = binned_statistic(igroups, im_var, func=np.sum)
EE_var = np.cumsum(sums)
EE_sig = np.sqrt(EE_var / nint)
EE_snr = snr_fact * (EE_flux/f) / EE_sig
snr_rad = np.interp(rad_EE, rad_pix, EE_snr)
snr_arr.append(snr_rad)
snr_arr = np.asarray(snr_arr)
mag_lim = np.interp(nsig, snr_arr[::-1], mag_arr[::-1])
_log.debug('Mag Limits [{0:.2f},{1:.2f}]; {2:.0f}-sig: {3:.2f}'.\
format(mag_arr.min(),mag_arr.max(),nsig,mag_lim))
# Renormalize spectrum at given magnitude limit
sp_norm2 = sp.renorm(mag_lim, 'vegamag', bp)
# Determine effective stimulus
obs2 = S.Observation(sp_norm2, bp, binset=waveset)
bglim = obs2.effstim(units)
out1 = {'sensitivity':bglim, 'units':units, 'nsig':nsig, 'Spectrum':sp.name}
# Same thing as above, but for surface brightness
for ii in np.arange(2):
if ii==0: mag_arr = np.arange(5,35,1)
else: mag_arr = np.arange(mag_lim-1,mag_lim+1,0.05)
fact_arr = 10**((mag_arr-mag_norm)/2.5)
snr_arr = []
for f in fact_arr:
im_var = pix_noise(ngroup=ngroup, nf=nf, nd2=nd2, tf=tf,
fzodi=fzodi_pix, fsrc=image_ext/f, **kwargs)**2
im_sig = np.sqrt(im_var*npix_EE / nint)
fsum2 = image_ext * npix_EE / f
snr2 = snr_fact * fsum2 / im_sig
#print('{:.5f} {:.5f} {:.2f}'.format(fsum2,im_sig,snr2))
snr_arr.append(snr2)
snr_arr = np.asarray(snr_arr)
mag_lim = np.interp(nsig, snr_arr[::-1], mag_arr[::-1])
_log.debug('Mag Limits (mag/asec^2) [{0:.2f},{1:.2f}]; {2:.0f}-sig: {3:.2f}'.\
format(mag_arr.min(),mag_arr.max(),nsig,mag_lim))
# mag_lim is in terms of mag/arcsec^2 (same as mag_norm)
sp_norm2 = sp.renorm(mag_lim, 'vegamag', bp)
obs2 = S.Observation(sp_norm2, bp, binset=waveset)
bglim2 = obs2.effstim(units) # units/arcsec**2
out2 = out1.copy()
out2['sensitivity'] = bglim2
out2['units'] = units+'/arcsec^2'
if verbose:
print('{} Sensitivity ({}-sigma): {:.2f} {}'.\
format('Point Source', nsig, bglim, out1['units']))
print('{} Sensitivity ({}-sigma): {:.2f} {}'.\
format('Surface Brightness', nsig, bglim2, out2['units']))
return out1, out2
|
{
"content_hash": "197741be33f17f2cdef0df2a69806c97",
"timestamp": "",
"source": "github",
"line_count": 3296,
"max_line_length": 134,
"avg_line_length": 40.37378640776699,
"alnum_prop": 0.5692331970662499,
"repo_name": "JarronL/pynrc",
"id": "600a5a64421f29589ecd5fd65cc5461e4275c349",
"size": "133072",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pynrc/pynrc_core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "121035282"
},
{
"name": "Makefile",
"bytes": "2372"
},
{
"name": "Python",
"bytes": "1502350"
}
],
"symlink_target": ""
}
|
import re
def search_equality(text):
""" Return the letter if that matches the pattern, else return None
"""
# pattern: [1 lower](3 uppers)[1 lower](3 uppers)[1 lower], e.g.: aBBDiLDMx
pattern = "[a-z][A-Z][A-Z][A-Z][a-z][A-Z][A-Z][A-Z][a-z]"
equality = re.search(pattern, text)
if equality:
# just return the lowercase on the position 5 (index[4])
return equality.group()[4]
else:
return None
if __name__ == '__main__':
text_path = "./data/3_equality.txt"
equalities = []
text = open(text_path, 'r')
for line in text.readlines():
# search the letters those matching the patterns
equality = search_equality(line.strip())
# just append the valid letters, None exclusive
equalities.append(equality) if equality is not None else equalities
text.close()
print ''.join(letter for letter in equalities)
|
{
"content_hash": "4e1f6d7df769557480b09bdb037d3238",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 79,
"avg_line_length": 31.413793103448278,
"alnum_prop": 0.6180021953896817,
"repo_name": "KellyChan/python-examples",
"id": "04fc5d0c00301ac25c640238e7bf17ed3a44aeb5",
"size": "911",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/python_challenge/3_equality.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "86277"
},
{
"name": "HTML",
"bytes": "320182"
},
{
"name": "JavaScript",
"bytes": "154998"
},
{
"name": "Jupyter Notebook",
"bytes": "30660"
},
{
"name": "Python",
"bytes": "238130"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding = 'utf-8') as f:
long_description = f.read()
setup(
name = 'money',
version = '0.0.1',
description = 'gestisce spese e entrate',
long_description = long_description,
url = 'https://github.com/scompo/money',
author = 'Mauro Scomparin',
author_email = 'scompo@gmail.com',
license = 'BSD',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'utils', 'scripts']),
entry_points={
'console_scripts' : [
'inserisci=money.money:inserimento_dati',
'riassunto=money.money:riassunto_dati',
],
},
)
|
{
"content_hash": "11891b336f9bf5db41ceda523619b578",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 85,
"avg_line_length": 29.53846153846154,
"alnum_prop": 0.6276041666666666,
"repo_name": "scompo/money",
"id": "edf3f89a9671c85052fd5d16b6fdf76307130f08",
"size": "768",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5864"
},
{
"name": "Shell",
"bytes": "194"
}
],
"symlink_target": ""
}
|
import unittest
from unittest import mock
from tethys_services.backends.hydroshare_playground import HydroSharePlaygroundOAuth2
class TestHydroSharePlayground(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
@mock.patch('tethys_services.backends.hydroshare_beta.HydroShareOAuth2')
def test_HydroSharePlaygroundOAuth2(self, mock_hydro_share_auth2):
hydro_share_beta_obj = HydroSharePlaygroundOAuth2(mock_hydro_share_auth2)
expected_auth_server_full_url = 'https://playground.hydroshare.org'
self.assertEqual(expected_auth_server_full_url, hydro_share_beta_obj.auth_server_full_url)
expected_authorization_url = "https://playground.hydroshare.org/o/authorize/"
self.assertEqual(expected_authorization_url, hydro_share_beta_obj.AUTHORIZATION_URL)
expected_access_toekn_url = "https://playground.hydroshare.org/o/token/"
self.assertEqual(expected_access_toekn_url, hydro_share_beta_obj.ACCESS_TOKEN_URL)
expected_user_info = "https://playground.hydroshare.org/hsapi/userInfo/"
self.assertEqual(expected_user_info, hydro_share_beta_obj.USER_DATA_URL)
|
{
"content_hash": "87162372a08e3b5ecf115c0af1d5c4ab",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 98,
"avg_line_length": 43.55555555555556,
"alnum_prop": 0.7431972789115646,
"repo_name": "CI-WATER/tethys",
"id": "036d6d54d99e30c0254b0d513bd3e363467802cd",
"size": "1176",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/unit_tests/test_tethys_services/test_backends/test_hydroshare_playground.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "175789"
},
{
"name": "HTML",
"bytes": "149728"
},
{
"name": "JavaScript",
"bytes": "360375"
},
{
"name": "Python",
"bytes": "592551"
}
],
"symlink_target": ""
}
|
'''Utilities'''
import pkg_resources
def example_audio_file():
"""Get the included example file"""
path = 'example_audio/amen.wav'
return pkg_resources.resource_filename(__name__, path)
def example_mono_audio_file():
"""Get the included example file"""
path = 'example_audio/amen-mono.wav'
return pkg_resources.resource_filename(__name__, path)
|
{
"content_hash": "35b028d332448ba392dcbbbd59c06cc0",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 58,
"avg_line_length": 24.933333333333334,
"alnum_prop": 0.6764705882352942,
"repo_name": "algorithmic-music-exploration/amen",
"id": "c84a1a52f687914b7761fa4c1db530e21cc28647",
"size": "420",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "amen/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "54522"
},
{
"name": "Shell",
"bytes": "931"
}
],
"symlink_target": ""
}
|
"""Not-MNIST handler."""
import os
from dm_nevis.datasets_storage.handlers import extraction_utils as utils
from dm_nevis.datasets_storage.handlers import splits
from dm_nevis.datasets_storage.handlers import types
from tensorflow.io import gfile
_IGNORED_FILES_REGEX = '|'.join([
utils.DEFAULT_IGNORED_FILES_REGEX,
r'notMNIST_small/notMNIST_small/A/RGVtb2NyYXRpY2FCb2xkT2xkc3R5bGUgQm9sZC50dGY=.png',
r'notMNIST_small/notMNIST_small/F/Q3Jvc3NvdmVyIEJvbGRPYmxpcXVlLnR0Zg==.png'
])
def _path_to_label_fn(path: str, label_to_id):
label = os.path.basename(os.path.dirname(path))
return label_to_id[label]
def not_mnist_handler(dataset_path: str) -> types.HandlerOutput:
"""Not-MNIST dataset handler."""
files = gfile.listdir(dataset_path)
labels = [
'A',
'B',
'C',
'D',
'E',
'F',
'G',
'H',
'I',
'J',
]
label_to_id = dict(
((label, label_id) for label_id, label in enumerate(labels)))
metadata = types.DatasetMetaData(
num_classes=len(labels),
num_channels=1,
image_shape=(), # Ignored for now.
additional_metadata=dict(
label_to_id=label_to_id,
task_type='classification',
image_type='ocr',
))
def make_gen_fn():
return utils.generate_images_from_zip_files(
dataset_path,
files,
path_to_label_fn=lambda path: _path_to_label_fn(path, label_to_id),
ignored_files_regex=_IGNORED_FILES_REGEX,
path_filter=lambda x: x.startswith('notMNIST_small/notMNIST_small'),
convert_mode='L')
# TODO: Make more efficient deduplication algorithm.
make_unique_gen_fn = utils.deduplicate_data_generator(make_gen_fn())
per_split_gen = splits.random_split_generator_into_splits_with_fractions(
make_unique_gen_fn, splits.SPLIT_WITH_FRACTIONS_FOR_ALL_DATA,
splits.MERGED_TRAIN_AND_DEV)
return metadata, per_split_gen
not_mnist_dataset = types.DownloadableDataset(
name='not_mnist',
download_urls=[types.KaggleDataset(
dataset_name='jwjohnson314/notmnist',
checksum='e2a47bb2a88c2c6bcae60d9f95223ace')],
website_url='https://www.kaggle.com/jwjohnson314/notmnist',
handler=not_mnist_handler)
|
{
"content_hash": "d34a67c8d27df029287bf5a466832ac9",
"timestamp": "",
"source": "github",
"line_count": 77,
"max_line_length": 88,
"avg_line_length": 29.09090909090909,
"alnum_prop": 0.6683035714285714,
"repo_name": "deepmind/dm_nevis",
"id": "cd2730ab3c63b71359b2da3614d630f06a60de58",
"size": "2835",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dm_nevis/datasets_storage/handlers/not_mnist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1043549"
},
{
"name": "Shell",
"bytes": "2750"
}
],
"symlink_target": ""
}
|
import arbor
from arbor import mechanism as mech
from arbor import location as loc
import pandas, seaborn
import sys
# Load a cell morphology from an nml file.
# Example present here: morph.nml
if len(sys.argv) < 2:
print("No NeuroML file passed to the program")
sys.exit(0)
filename = sys.argv[1]
# Read the NeuroML morphology from the file.
morpho_nml = arbor.neuroml(filename)
# Read the morphology data associated with morphology "m1".
morpho_data = morpho_nml.morphology("m1")
# Get the morphology.
morpho = morpho_data.morphology
# Get the region label dictionaries associated with the morphology.
morpho_segments = morpho_data.segments()
morpho_named = morpho_data.named_segments()
morpho_groups = morpho_data.groups()
# Create new label dict add to it all the NeuroML dictionaries.
labels = arbor.label_dict()
labels.append(morpho_segments)
labels.append(morpho_named)
labels.append(morpho_groups)
# Add locsets to the label dictionary.
labels['stim_site'] = '(location 1 0.5)' # site for the stimulus, in the middle of branch 1.
labels['axon_end'] = '(restrict (terminal) (region "axon"))' # end of the axon.
labels['root'] = '(root)' # the start of the soma in this morphology is at the root of the cell.
# Optional: print out the regions and locsets available in the label dictionary.
print("Label dictionary regions: ", labels.regions, "\n")
print("Label dictionary locsets: ", labels.locsets, "\n")
decor = arbor.decor()
# Set initial membrane potential to -55 mV
decor.set_property(Vm=-55)
# Use Nernst to calculate reversal potential for calcium.
decor.set_ion('ca', method=mech('nernst/x=ca'))
#decor.set_ion('ca', method='nernst/x=ca')
# hh mechanism on the soma and axon.
decor.paint('"soma"', arbor.density('hh'))
decor.paint('"axon"', arbor.density('hh'))
# pas mechanism the dendrites.
decor.paint('"dend"', arbor.density('pas'))
# Increase resistivity on dendrites.
decor.paint('"dend"', rL=500)
# Attach stimuli that inject 4 nA current for 1 ms, starting at 3 and 8 ms.
decor.place('"root"', arbor.iclamp(10, 1, current=5), "iclamp0")
decor.place('"stim_site"', arbor.iclamp(3, 1, current=0.5), "iclamp1")
decor.place('"stim_site"', arbor.iclamp(10, 1, current=0.5), "iclamp2")
decor.place('"stim_site"', arbor.iclamp(8, 1, current=4), "iclamp3")
# Detect spikes at the soma with a voltage threshold of -10 mV.
decor.place('"axon_end"', arbor.spike_detector(-10), "detector")
# Create the policy used to discretise the cell into CVs.
# Use a single CV for the soma, and CVs of maximum length 1 μm elsewhere.
soma_policy = arbor.cv_policy_single('"soma"')
dflt_policy = arbor.cv_policy_max_extent(1.0)
policy = dflt_policy | soma_policy
decor.discretization(policy)
# Combine morphology with region and locset definitions to make a cable cell.
cell = arbor.cable_cell(morpho, labels, decor)
print(cell.locations('"axon_end"'))
# Make single cell model.
m = arbor.single_cell_model(cell)
# Attach voltage probes that sample at 50 kHz.
m.probe('voltage', where='"root"', frequency=50)
m.probe('voltage', where='"stim_site"', frequency=50)
m.probe('voltage', where='"axon_end"', frequency=50)
# Simulate the cell for 15 ms.
tfinal=15
m.run(tfinal)
print("Simulation done.")
# Print spike times.
if len(m.spikes)>0:
print('{} spikes:'.format(len(m.spikes)))
for s in m.spikes:
print(' {:7.4f}'.format(s))
else:
print('no spikes')
# Plot the recorded voltages over time.
print("Plotting results ...")
df_list = []
for t in m.traces:
df_list.append(pandas.DataFrame({'t/ms': t.time, 'U/mV': t.value, 'Location': str(t.location), "Variable": t.variable}))
df = pandas.concat(df_list,ignore_index=True)
seaborn.relplot(data=df, kind="line", x="t/ms", y="U/mV",hue="Location",col="Variable",ci=None).savefig('single_cell_nml.svg')
|
{
"content_hash": "08aa32208182869f6dfbb5ccfadff420",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 126,
"avg_line_length": 35.58878504672897,
"alnum_prop": 0.7129726890756303,
"repo_name": "halfflat/arbor",
"id": "d4f573133f8f96e33c643c498c9cbd6f8b1a74f8",
"size": "3832",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/example/single_cell_nml.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "AMPL",
"bytes": "60486"
},
{
"name": "C",
"bytes": "81783"
},
{
"name": "C++",
"bytes": "5838781"
},
{
"name": "CMake",
"bytes": "79439"
},
{
"name": "Cuda",
"bytes": "60413"
},
{
"name": "Dockerfile",
"bytes": "3701"
},
{
"name": "Julia",
"bytes": "16396"
},
{
"name": "Objective-C",
"bytes": "42904"
},
{
"name": "Perl",
"bytes": "11581"
},
{
"name": "Python",
"bytes": "222000"
},
{
"name": "Shell",
"bytes": "11205"
}
],
"symlink_target": ""
}
|
"""
Demo for DBNanoServer
"""
import requests
import json
import numpy as np
import datetime
import matplotlib.pyplot as plt
def main():
# Generate a sine and cosine wave
Fs = 800
f = 60
sample = 50
x = np.arange(sample)
y_sin = np.sin(2 * np.pi * f * x / Fs)
y_cos = np.cos(2 * np.pi * f * x / Fs)
# Generature UNIX timestamps for each data point
unixtime = int((datetime.datetime.utcnow() - datetime.datetime.utcfromtimestamp(0)).total_seconds())
at = unixtime + x - sample
print at
# Plot the sine wave
plt.plot(y_sin)
plt.show()
#
# Send the data to the server
#
# Set url address.
base = 'http://127.0.0.1:5000/'
# Set query (i.e. http://url.com/?key=value).
query = {}
# Set header.
header = {'Content-Type':'application/json'}
# First, send the sine wave
endpoint = 'network/Demo/object/Waves/stream/Sine'
payload = []
for i in range(sample):
payload.append( {'value':y_sin[i],'at':at[i]} )
# Set body (also referred to as data or payload). Body is a JSON string.
body = json.dumps(payload)
# Form and send request. Set timeout to 2 minutes. Receive response.
r = requests.request('post', base + endpoint, data=body, params=query, headers=header, timeout=120 )
print r.url
# Text is JSON string. Convert to Python dictionary/list
print r.text
#print json.loads( r.text )
# Second, send the cosine wave
endpoint = 'network/Demo/object/Waves/stream/Cosine'
payload = []
for i in range(sample):
payload.append( {'value':y_cos[i],'at':at[i]} )
body = json.dumps(payload)
# Form and send request. Set timeout to 2 minutes. Receive response.
r = requests.request('post', base + endpoint, data=body, params=query, headers=header, timeout=120 )
print r.url
# Text is JSON string. Convert to Python dictionary/list
print r.text
#print json.loads( r.text )
# Third, read the data from the Cosine stream
endpoint = 'network/Demo/object/Waves/stream/Cosine'
address = base + endpoint
query = {'limit':100}
# Form and send request. Set timeout to 2 minutes. Receive response.
r = requests.request('get', address, params=query, headers=header, timeout=120 )
print r.url
# Text is JSON string. Convert to Python dictionary/list
print r.text
#print json.loads( r.text )
main()
|
{
"content_hash": "707c9101152ab2b7ace10aedd6cff05d",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 104,
"avg_line_length": 26.782608695652176,
"alnum_prop": 0.6298701298701299,
"repo_name": "ksk5429/ShowerThoughtsProject",
"id": "d45ba9af32e6fcd9a98e145684d25e920714e738",
"size": "2488",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "DBNanoServer/demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "243062"
},
{
"name": "HTML",
"bytes": "348951"
},
{
"name": "JavaScript",
"bytes": "1521983"
},
{
"name": "Python",
"bytes": "72138"
}
],
"symlink_target": ""
}
|
from __future__ import annotations # isort:skip
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
from subprocess import run
from sys import executable as python
# Bokeh imports
from tests.support.util.project import ls_modules, verify_clean_imports
#-----------------------------------------------------------------------------
# Tests
#-----------------------------------------------------------------------------
# There should not be unprotected pandas imports /anywhere/
PANDAS_ALLOWED = (
"bokeh.sampledata",
"bokeh.sphinxext",
"tests.support",
)
MODULES = ls_modules(skip_prefixes=PANDAS_ALLOWED)
# This test takes a long time to run, but if the combined test fails then
# uncommenting it will locate exactly what module(s) are the problem
# @pytest.mark.parametrize('module', MODULES)
# def test_no_pandas_common_individual(module) -> None:
# proc = run([python, "-c", verify_clean_imports('pandas', [module])])
# assert proc.returncode == 0, f"pandas imported in common module {module}"
def test_no_pandas_common_combined() -> None:
''' In order to keep the initial import times reasonable, import
of Bokeh should not result in any Pandas code being imported. This
test ensures that importing basic modules does not bring in pandas.
'''
proc = run([python, "-c", verify_clean_imports('pandas', MODULES)])
assert proc.returncode == 0, "pandas imported in common modules"
|
{
"content_hash": "4a6476c706772b091e2dbb2881a069aa",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 79,
"avg_line_length": 37.25581395348837,
"alnum_prop": 0.5811485642946317,
"repo_name": "bokeh/bokeh",
"id": "4545619d046f03df82b033115f8c5075909d6529",
"size": "2122",
"binary": false,
"copies": "1",
"ref": "refs/heads/branch-3.1",
"path": "tests/codebase/test_no_pandas_common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1884"
},
{
"name": "Dockerfile",
"bytes": "1924"
},
{
"name": "GLSL",
"bytes": "44696"
},
{
"name": "HTML",
"bytes": "53475"
},
{
"name": "JavaScript",
"bytes": "20301"
},
{
"name": "Less",
"bytes": "46376"
},
{
"name": "Python",
"bytes": "4475226"
},
{
"name": "Shell",
"bytes": "7673"
},
{
"name": "TypeScript",
"bytes": "3652153"
}
],
"symlink_target": ""
}
|
import argparse
import json
import os
import signal
import sys
from time import sleep
import uuid
import alsa_sink
import spotifyconnect
from flask.testsuite import catch_stderr
from spotifyconnect.error import LibError
class Connect:
def __init__(self, web_arg_parser = None):
if web_arg_parser:
arg_parser = argparse.ArgumentParser(description='Web interface for Spotify Connect', parents=[web_arg_parser], add_help=True)
else:
arg_parser = argparse.ArgumentParser(description='Web interface for Spotify Connect', add_help=True)
arg_parser.add_argument('--debug', '-d', help='enable libspotify_embedded/flask debug output', action="store_true")
arg_parser.add_argument('--key', '-k', help='path to spotify_appkey.key (can be obtained from https://developer.spotify.com/my-account/keys )', default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'spotify_appkey.key'))
arg_parser.add_argument('--username', '-u', help='your spotify username')
arg_parser.add_argument('--password', '-p', help='your spotify password')
arg_parser.add_argument('--name', '-n', help='name that shows up in the spotify client', default='TestConnect')
arg_parser.add_argument('--bitrate', '-b', help='Sets bitrate of alsa_sink stream (may not actually work)', choices=[90, 160, 320], type=int, default=160)
arg_parser.add_argument('--credentials', '-c', help='File to load and save credentials from/to', default='credentials.json')
arg_parser.add_argument('--device', '-D', help='alsa output device', default='default')
arg_parser.add_argument('--mixer', '-m', help='alsa mixer name for volume control', default='')
arg_parser.add_argument('--volmin', '-v', help='minimum mixer volume (percentage)', metavar='{0-99}', choices=xrange(0, 100), type=int, default=0)
arg_parser.add_argument('--volmax', '-V', help='maximum mixer volume (percentage)', metavar='{1-100}', choices=xrange(1, 101), type=int, default=100)
self.args = arg_parser.parse_args()
self.credentials = dict({
'device-id': str(uuid.uuid4()),
'username': None,
'blob': None
})
try:
with open(self.args.credentials) as f:
self.credentials.update(
{ k: v.encode('utf-8') if isinstance(v, unicode) else v
for (k, v)
in json.loads(f.read()).iteritems() })
except IOError:
pass
if self.args.username:
self.credentials['username'] = self.args.username
self.config = spotifyconnect.Config()
try:
self.config.load_application_key_file(self.args.key)
except IOError as e:
print "Error opening app key: {}.".format(e)
print "If you don't have one, it can be obtained from https://developer.spotify.com/my-account/keys"
sys.exit(1)
self.config.device_id = self.credentials['device-id']
self.config.remote_name = self.args.name
try:
self.session = spotifyconnect.Session(self.config)
except spotifyconnect.LibError as error:
print "New spotify-connect session failed:", error.message
print "Exiting."
sys.exit(1)
# Connection object, callbacks and events
self.session.connection.on(spotifyconnect.ConnectionEvent.CONNECTION_NOTIFY_UPDATED, self.connection_notify)
self.session.connection.on(spotifyconnect.ConnectionEvent.NEW_CREDENTIALS, self.connection_new_credentials)
if self.args.debug:
self.session.connection.on(spotifyconnect.DebugEvent.DEBUG_MESSAGE, self.debug_message)
self.session.player.on(spotifyconnect.PlayerEvent.PLAYBACK_NOTIFY, self.playback_notify)
self.session.player.on(spotifyconnect.PlayerEvent.PLAYBACK_SEEK, self.playback_seek)
self.audio_player = alsa_sink.AlsaSink(self.args.device)
self.audio_player.mixer_load()
self.session.player.on(spotifyconnect.PlayerEvent.PLAYBACK_VOLUME, self.volume_set)
mixer_volume = self.audio_player.volume_get()
self.session.player.volume = mixer_volume
if self.args.bitrate == 90:
bitrate = spotifyconnect.Bitrate.BITRATE_90k
elif self.args.bitrate == 160:
bitrate = spotifyconnect.Bitrate.BITRATE_160k
elif self.args.bitrate == 320:
bitrate = spotifyconnect.Bitrate.BITRATE_320k
self.session.player.set_bitrate(bitrate)
self.print_zeroconf_vars(self.session.get_zeroconf_vars())
if self.credentials['username'] and self.args.password:
self.session.connection.login(self.credentials['username'], password=self.args.password)
elif self.credentials['username'] and self.credentials['blob']:
self.session.connection.login(self.credentials['username'], blob=self.credentials['blob'])
self.playback_session = PlaybackSession()
self.event_loop = spotifyconnect.EventLoop(self.session)
self.event_loop.start()
signal.signal(signal.SIGINT, self.signal_handler)
signal.signal(signal.SIGTERM, self.signal_handler)
# Connection callbacks
def connection_notify(self, notify, session):
print notify._name
def connection_new_credentials(self, blob, session):
print blob
self.credentials['blob'] = blob
zeroconf = session.get_zeroconf_vars()
self.credentials['username'] = zeroconf.active_user
with open(self.args.credentials, 'w') as f:
f.write(json.dumps(self.credentials))
# Debug callbacks
def debug_message(self, msg, session):
print msg
# Playback callbacks
def playback_notify(self, notify, session):
# TODO: Check that device is active
if notify == spotifyconnect.PlaybackNotify.Play:
print "kSpPlaybackNotifyPlay"
if self.playback_session.active:
if not self.audio_player.acquired():
try:
self.audio_player.acquire()
print "DeviceAcquired"
self.audio_player.play()
except alsa_sink.PlayerError as error:
print error
session.player.pause()
else:
self.audio_player.play()
elif notify == spotifyconnect.PlaybackNotify.Pause:
print "kSpPlaybackNotifyPause"
if self.audio_player.playing():
self.audio_player.pause()
self.audio_player.release()
print "DeviceReleased"
elif notify == spotifyconnect.PlaybackNotify.TrackChanged:
print "kSpPlaybackNotifyTrackChanged"
elif notify == spotifyconnect.PlaybackNotify.Next:
print "kSpPlaybackNotifyNext"
elif notify == spotifyconnect.PlaybackNotify.Prev:
print "kSpPlaybackNotifyPrev"
elif notify == spotifyconnect.PlaybackNotify.ShuffleEnabled:
print "kSpPlaybackNotifyShuffleEnabled"
elif notify == spotifyconnect.PlaybackNotify.ShuffleDisabled:
print "kSpPlaybackNotifyShuffleDisabled"
elif notify == spotifyconnect.PlaybackNotify.RepeatEnabled:
print "kSpPlaybackNotifyRepeatEnabled"
elif notify == spotifyconnect.PlaybackNotify.RepeatDisabled:
print "kSpPlaybackNotifyRepeatDisabled"
elif notify == spotifyconnect.PlaybackNotify.BecameActive:
print "kSpPlaybackNotifyBecameActive"
self.playback_session.activate()
elif notify == spotifyconnect.PlaybackNotify.BecameInactive:
print "kSpPlaybackNotifyBecameInactive"
self.playback_session.deactivate()
if self.audio_player.acquired():
self.audio_player.pause()
self.audio_player.release()
print "DeviceReleased"
elif notify == spotifyconnect.PlaybackNotify.PlayTokenLost:
print "kSpPlaybackNotifyPlayTokenLost"
elif notify == spotifyconnect.PlaybackNotify.AudioFlush:
print "kSpPlaybackEventAudioFlush"
if self.audio_player.acquired():
self.audio_player.buffer_flush()
else:
print "UNKNOWN PlaybackNotify {}".format(notify)
def volume_set(self, volume, session):
print "volume: {}".format(volume)
self.audio_player.volume_set(volume)
def playback_seek(self, millis, session):
print "playback_seek: {}".format(millis)
def signal_handler(self, signal, frame):
self.event_loop.stop()
self.session.connection.logout()
self.session.free_session()
sys.exit(0)
def print_zeroconf_vars(self, zeroconf_vars):
print "public key: {}".format(zeroconf_vars.public_key)
print "device id: {}".format(zeroconf_vars.device_id)
print "remote name: {}".format(zeroconf_vars.remote_name)
print "account req: {}".format(zeroconf_vars.account_req)
print "device type: {}".format(zeroconf_vars.device_type)
class PlaybackSession:
def __init__(self):
self._active = False
@property
def active(self):
return self._active
def activate(self):
self._active = True
def deactivate(self):
self._active = False
# First run the command avahi-publish-service TestConnect _spotify-connect._tcp 4000 VERSION=1.0 CPath=/login/_zeroconf
# Only run if script is run directly and not by an import
if __name__ == "__main__":
connect = Connect()
zeroconfserver = spotifyconnect.AvahiZeroConfServer(4000)
zeroconfserver.run()
while True:
sleep(5)
|
{
"content_hash": "931a7dc5467f78b5725ba70b5506d3f7",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 240,
"avg_line_length": 42.47639484978541,
"alnum_prop": 0.637566939476609,
"repo_name": "chukysoria/spotify-connect-web",
"id": "e81223a902d985079f99a4418a05497e66da003f",
"size": "9920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "connect_console.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "7252"
},
{
"name": "JavaScript",
"bytes": "6186"
},
{
"name": "Python",
"bytes": "23183"
},
{
"name": "Shell",
"bytes": "1949"
}
],
"symlink_target": ""
}
|
import sys
import os
import shlex
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'Moto'
copyright = '2015, Steve Pulec'
author = 'Steve Pulec'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.4.10'
# The full version, including alpha/beta/rc tags.
release = '0.4.10'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'Motodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'Moto.tex', 'Moto Documentation',
'Steve Pulec', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'moto', 'Moto Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'Moto', 'Moto Documentation',
author, 'Moto', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
{
"content_hash": "f18e4258a8bdbfa3cf55b37caba05a81",
"timestamp": "",
"source": "github",
"line_count": 270,
"max_line_length": 79,
"avg_line_length": 32.27777777777778,
"alnum_prop": 0.7039586919104991,
"repo_name": "kefo/moto",
"id": "28a4b4e6bd1f0825b1c323fa28842c33aa236cbf",
"size": "9155",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "5848"
},
{
"name": "Java",
"bytes": "1688"
},
{
"name": "JavaScript",
"bytes": "756"
},
{
"name": "Makefile",
"bytes": "712"
},
{
"name": "Python",
"bytes": "2996908"
},
{
"name": "Ruby",
"bytes": "188"
}
],
"symlink_target": ""
}
|
import json
import click
from tabulate import tabulate
@click.command('groups', short_help='List user groups')
@click.pass_obj
def cli(obj):
"""List groups."""
client = obj['client']
if obj['output'] == 'json':
r = client.http.get('/groups')
click.echo(json.dumps(r['groups'], sort_keys=True, indent=4, ensure_ascii=False))
else:
headers = {'id': 'ID', 'name': 'NAME', 'count': 'USERS', 'text': 'DESCRIPTION'}
click.echo(tabulate([g.tabular() for g in client.get_users_groups()], headers=headers, tablefmt=obj['output']))
|
{
"content_hash": "24e8cb200e5518eba628f633192ec497",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 119,
"avg_line_length": 31.88888888888889,
"alnum_prop": 0.6254355400696864,
"repo_name": "alerta/python-alerta",
"id": "61348e2955595d4a56e8804c4d20625ed6669c93",
"size": "574",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "alertaclient/commands/cmd_groups.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "106941"
}
],
"symlink_target": ""
}
|
"""UI view definitions."""
import json
from datetime import timedelta
from logging import getLogger
from django.conf import settings
from django.contrib.auth import authenticate, login
from django.contrib.auth import logout as auth_logout
from django.urls import reverse
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.template.context_processors import csrf
from django.views.decorators.http import require_http_methods
from rest_framework.renderers import JSONRenderer
from eventkit_cloud.api.serializers import UserDataSerializer
from eventkit_cloud.ui.helpers import (
file_to_geojson,
set_session_user_last_active_at,
is_mgrs,
is_lat_lon,
write_uploaded_file,
)
from eventkit_cloud.utils.geocoding.coordinate_converter import CoordinateConverter
from eventkit_cloud.utils.geocoding.geocode import Geocode
from eventkit_cloud.utils.geocoding.reverse import ReverseGeocode
logger = getLogger(__file__)
@require_http_methods(["GET"])
def create_export(request):
"""
Handles display of the create export page.
"""
user = request.user
max_extent = {"extent": settings.JOB_MAX_EXTENT}
for group in user.groups.all():
if hasattr(group, "export_profile"):
max_extent["extent"] = group.export_profile.max_extent
extent = max_extent.get("extent")
context = {"user": user, "max_extent": extent}
context.update(csrf(request))
return render(request, "ui/create.html", context)
# @user_verification_required
@require_http_methods(["GET"])
def clone_export(request, uuid=None):
"""
Handles display of the clone export page.
"""
max_extent = {"extent": settings.JOB_MAX_EXTENT} # default
user = request.user
for group in user.groups.all():
if hasattr(group, "export_profile"):
max_extent["extent"] = group.export_profile.max_extent
extent = max_extent.get("extent")
context = {"user": user, "max_extent": extent}
context.update(csrf(request))
return render(request, "ui/clone.html", context)
# @user_verification_required
@require_http_methods(["GET"])
def view_export(request, uuid=None): # NOQA
"""
Handles display of the clone export page.
"""
user = request.user
context = {"user": user}
return render(request, "ui/detail.html", context)
def auth(request):
if (request.method == "GET") and request.user.is_authenticated:
# If the user is already authenticated we want to return the user data (required for oauth).
return HttpResponse(
JSONRenderer().render(UserDataSerializer(request.user, context={"request": request}).data),
content_type="application/json",
status=200,
)
elif getattr(settings, "LDAP_SERVER_URI", getattr(settings, "DJANGO_MODEL_LOGIN")):
if request.method == "POST":
"""Logs out user"""
auth_logout(request)
username = request.POST.get("username")
password = request.POST.get("password")
user_data = authenticate(username=username, password=password)
if user_data is None:
return HttpResponse(status=401)
else:
login(request, user_data)
set_session_user_last_active_at(request)
return HttpResponse(
JSONRenderer().render(UserDataSerializer(user_data, context={"request": request}).data),
content_type="application/json",
status=200,
)
if request.method == "GET":
# We want to return a 200 so that the frontend can decide if the auth endpoint is valid for displaying the
# the login form.
return HttpResponse(status=200)
else:
return HttpResponse(status=400)
def logout(request):
"""Logs out user"""
auth_logout(request)
response = redirect("login")
if settings.SESSION_USER_LAST_ACTIVE_AT in request.session:
del request.session[settings.SESSION_USER_LAST_ACTIVE_AT]
response.delete_cookie(settings.AUTO_LOGOUT_COOKIE_NAME, domain=settings.SESSION_COOKIE_DOMAIN)
return response
def require_email(request):
"""
View to handle email collection for new user log in with OSM account.
"""
backend = request.session["partial_pipeline"]["backend"]
return render(request, "osm/email.html", {"backend": backend})
@require_http_methods(["GET"])
def search(request):
"""
Detects the query type and calls the relevant geocoder to get results
:param request: User request which should include a query parameter
:return: A geojson with features matching the search query
"""
q = request.GET.get("query", None)
if not q:
return HttpResponse(status=204, content_type="application/json")
error_string = "An unknown error occurred while querying for results, please contact an administrator."
degree_range = 0.05
if is_mgrs(q):
# check for necessary settings
if getattr(settings, "CONVERT_API_URL") is None:
return HttpResponse("No Convert API specified", status=501)
if getattr(settings, "REVERSE_GEOCODING_API_URL") is None:
return HttpResponse("No Reverse Geocode API specified", status=501)
# make call to convert which should return a geojson feature of the MGRS location
convert = CoordinateConverter()
try:
mgrs_data = convert.get(q)
except Exception as e:
logger.error(e)
return HttpResponse(content=error_string, status=500)
# if no feature geom return nothing
if not mgrs_data or not mgrs_data.get("geometry"):
return HttpResponse(status=204, content_type="application/json")
features = []
# save the mgrs feature to return later
if not mgrs_data.get("properties"):
mgrs_data["properties"] = {}
mgrs_data["properties"]["bbox"] = [
mgrs_data.get("geometry").get("coordinates")[0] - degree_range,
mgrs_data.get("geometry").get("coordinates")[1] - degree_range,
mgrs_data.get("geometry").get("coordinates")[0] + degree_range,
mgrs_data.get("geometry").get("coordinates")[1] + degree_range,
]
mgrs_data["source"] = "MGRS"
features.append(mgrs_data)
# call reverse to get a list of results near the mgrs feature
reverse = ReverseGeocode()
try:
result = reverse.search(
{
"lat": mgrs_data.get("geometry").get("coordinates")[1],
"lon": mgrs_data.get("geometry").get("coordinates")[0],
}
)
except Exception as e:
logger.error(e)
return HttpResponse(content=error_string, status=500)
if result.get("features"):
# add the mgrs feature with the search results and return together
result["features"] = features + result["features"]
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
# if no results just return the MGRS feature in the response
return HttpResponse(content=json.dumps({"features": features}), status=200, content_type="application/json")
elif is_lat_lon(q):
coords = is_lat_lon(q)
# if no reverse url return 501
if getattr(settings, "REVERSE_GEOCODING_API_URL") is None:
return HttpResponse("No Reverse Geocode API specified", status=501)
# make call to reverse geocode
reverse = ReverseGeocode()
try:
result = reverse.search({"lat": coords[0], "lon": coords[1]})
except Exception:
return HttpResponse(content=error_string, status=500)
# create a feature representing the exact lat/lon being searched
point_feature = {
"geometry": {"type": "Point", "coordinates": [coords[1], coords[0]]},
"source": "Coordinate",
"type": "Feature",
"properties": {
"name": "{0} {1}, {2} {3}".format(
coords[0] if coords[0] >= 0 else coords[0] * -1,
"N" if coords[0] >= 0 else "S",
coords[1] if coords[1] >= 0 else coords[1] * -1,
"E" if coords[1] >= 0 else "W",
),
"bbox": [
coords[1] - degree_range,
coords[0] - degree_range,
coords[1] + degree_range,
coords[0] + degree_range,
],
},
}
# if there are results add the point feature and return them together
if result.get("features"):
result.get("features").insert(0, point_feature)
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
# if there are no results return only the point feature
features = {"features": [point_feature]}
return HttpResponse(content=json.dumps(features), status=200, content_type="application/json")
else:
# make call to geocode with search
geocode = Geocode()
try:
result = geocode.search(q)
except Exception as e:
logger.error(e)
return HttpResponse(content=error_string, status=500)
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
@require_http_methods(["GET"])
def geocode(request):
geocode = Geocode()
if request.GET.get("search"):
result = geocode.search(request.GET.get("search"))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
if request.GET.get("result"):
result = geocode.add_bbox(json.loads(request.GET.get("result")))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
else:
return HttpResponse(status=204, content_type="application/json")
@require_http_methods(["GET"])
def convert(request):
convert = CoordinateConverter()
if getattr(settings, "CONVERT_API_URL") is not None:
if request.GET.get("convert"):
result = convert.get(request.GET.get("convert"))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
else:
return HttpResponse(status=204, content_type="application/json")
else:
return HttpResponse("No Convert API specified", status=501)
@require_http_methods(["GET"])
def reverse_geocode(request):
reverseGeocode = ReverseGeocode()
if getattr(settings, "REVERSE_GEOCODING_API_URL") is not None:
if request.GET.get("lat") and request.GET.get("lon"):
result = reverseGeocode.search({"lat": request.GET.get("lat"), "lon": request.GET.get("lon")})
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
if request.GET.get("result"):
result = reverseGeocode.add_bbox(json.loads(request.GET.get("result")))
return HttpResponse(content=json.dumps(result), status=200, content_type="application/json")
else:
return HttpResponse(status=204, content_type="application/json")
else:
return HttpResponse("No Reverse Geocode API specified", status=501)
@require_http_methods(["GET"])
def about(request):
exports_url = reverse("list")
help_url = reverse("help")
return render(request, "ui/about.html", {"exports_url": exports_url, "help_url": help_url})
@require_http_methods(["GET"])
def help_main(request):
return render(request, "help/help.html", {})
@require_http_methods(["GET"])
def help_create(request):
create_url = reverse("create")
help_features_url = reverse("help_features")
return render(request, "help/help_create.html", {"create_url": create_url, "help_features_url": help_features_url})
@require_http_methods(["GET"])
def help_features(request):
return render(request, "help/help_features.html", {})
@require_http_methods(["GET"])
def help_exports(request):
export_url = reverse("list")
return render(request, "help/help_exports.html", {"export_url": export_url})
@require_http_methods(["GET"])
def help_formats(request):
return render(request, "help/help_formats.html", {})
@require_http_methods(["GET"])
def help_presets(request):
configurations_url = reverse("configurations")
return render(request, "help/help_presets.html", {"configurations_url": configurations_url})
@require_http_methods(["GET"])
def get_config(request):
"""
:param request: a GET request
:return: a dict of available configurations
"""
config = getattr(settings, "UI_CONFIG", {})
return HttpResponse(json.dumps(config), content_type="application/json", status=200)
@require_http_methods(["POST"])
def convert_to_geojson(request):
in_memory_file = request.FILES.get("file", None)
if not in_memory_file:
return HttpResponse("No file supplied in the POST request", status=400)
try:
output_file = write_uploaded_file(in_memory_file)
geojson = file_to_geojson(output_file)
return HttpResponse(json.dumps(geojson), content_type="application/json", status=200)
except Exception as e:
logger.error(e)
return HttpResponse(str(e), status=400)
def user_active(request):
"""Prevents auto logout by updating the session's last active time"""
# If auto logout is disabled, just return an empty body.
if not settings.AUTO_LOGOUT_SECONDS:
return HttpResponse(json.dumps({}), content_type="application/json", status=200)
last_active_at = set_session_user_last_active_at(request)
auto_logout_at = last_active_at + timedelta(seconds=settings.AUTO_LOGOUT_SECONDS)
auto_logout_warning_at = auto_logout_at - timedelta(seconds=settings.AUTO_LOGOUT_WARNING_AT_SECONDS_LEFT)
return HttpResponse(
json.dumps(
{
"auto_logout_at": auto_logout_at.isoformat(),
"auto_logout_warning_at": auto_logout_warning_at.isoformat(),
}
),
content_type="application/json",
status=200,
)
# error views
@require_http_methods(["GET"])
def create_error_view(request):
return render(request, "ui/error.html", {}, status=500)
def internal_error_view(request):
return render(request, "ui/500.html", {}, status=500)
def not_found_error_view(request):
return render(request, "ui/404.html", {}, status=404)
def not_allowed_error_view(request):
return render(request, "ui/403.html", {}, status=403)
|
{
"content_hash": "41f36edc80a78233c4244e30041f3d2a",
"timestamp": "",
"source": "github",
"line_count": 386,
"max_line_length": 119,
"avg_line_length": 38.27461139896373,
"alnum_prop": 0.6396372004873426,
"repo_name": "venicegeo/eventkit-cloud",
"id": "736c49b7067cf905a138628c6b044e7b0e359356",
"size": "14798",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "eventkit_cloud/ui/views.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "90420"
},
{
"name": "Dockerfile",
"bytes": "2466"
},
{
"name": "HTML",
"bytes": "85741"
},
{
"name": "Java",
"bytes": "123740"
},
{
"name": "JavaScript",
"bytes": "597810"
},
{
"name": "Python",
"bytes": "1145801"
},
{
"name": "Shell",
"bytes": "6127"
},
{
"name": "TypeScript",
"bytes": "1456680"
}
],
"symlink_target": ""
}
|
import numpy as np
def get_num_edges_meeting(neighbour_matrix, orig_vertex_id):
return np.sum(neighbour_matrix[orig_vertex_id,:,0] != -1)
def checkB1B2Reversal_opt(B1,quad_list,quad_index,vertex_index,regularPoints):
quadNumberLocal = np.where(B1[vertex_index,:,1] == quad_index)[0][0]
B1fromList = B1[vertex_index,quadNumberLocal,0]
thisQuad_cornerVertices = quad_list[quad_index,:]
whichQuadCorner = np.where(thisQuad_cornerVertices == vertex_index)[0][0]
localB1s = np.array([2,8,15,9])-1
localB2s = np.array([5,3,12,14])-1
shouldBeB1 = regularPoints[quad_index,localB1s[whichQuadCorner]]
shouldBeB2 = regularPoints[quad_index,localB2s[whichQuadCorner]]
"""
in peter's scheme, and therefore in the parameters, B1 is the left one if
looking into the quad corner. Since the quad corners go clockwise, that
means the relevant quad corner and the one after should be
shouldBeB1Edge = thisQuad_cornerVertices([whichQuadCorner,mod_index(whichQuadCorner+1,4)]);
shouldBeB2Edge = thisQuad_cornerVertices([whichQuadCorner,mod_index(whichQuadCorner-1,4)]);
"""
if B1fromList == shouldBeB1:
isReversed = False
elif shouldBeB2 == B1fromList:
isReversed = True
else:
print 'quad_index: '
print quad_index
print 'vertex_index'
print vertex_index
raise Exception('ERROR! Something wrong with the edges around the quads! Function checkB1B2Reversal')
return isReversed
def checkB1B2OrientationReversal(B1,B2,quad_list,quad_index,vertex_index):
"""
check if the B1 in the quad to the right lies along the same edge as the
%B2 in the current quad.
:param B1:
:param B2:
:param quad_list:
:param quad_index:
:param vertex_index:
:return: a Bool?
"""
mod_index = lambda i, modul: (i)%modul
B1s_this_vertex = np.reshape(B1[vertex_index,:,:],[B1.shape[1:3]])
B2s_this_vertex = np.reshape(B2[vertex_index,:,:],[B2.shape[1:3]])
numberOfEdges = get_num_edges_meeting(B1, vertex_index)
quadNumberLocal = np.where(B1[vertex_index,:,1] == quad_index)[0][0]
B1EdgeFromB1 = np.reshape(B1[vertex_index,quadNumberLocal,2:4],[1,2])
shouldBeSameAsB1Edge = np.reshape(B2[vertex_index,mod_index(quadNumberLocal - 1,numberOfEdges),2:4],[1,2])
isB1IfReversed = np.reshape(B2[vertex_index,mod_index(quadNumberLocal + 1,numberOfEdges),2:4],[1,2])
B2EdgeFromB2 = np.reshape(B2[vertex_index,quadNumberLocal,2:4],[1,2])
shouldBeSameAsB2Edge = np.reshape(B1[vertex_index,mod_index(quadNumberLocal + 1,numberOfEdges),2:4],[1,2])
isB2IfReversed = np.reshape(B1[vertex_index,mod_index(quadNumberLocal - 1,numberOfEdges),2:4],[1,2])
thisQuad_cornerVertices = quad_list[quad_index,:]
whichQuadCorner = np.where(thisQuad_cornerVertices == vertex_index)[0][0]
"""
in peter's scheme, and therefore in the parameters, B1 is the left one if
looking into the quad corner. Since the quad corners go clockwise, that
means the relevant quad corner and the one after should be
"""
shouldBeB1Edge = thisQuad_cornerVertices[[whichQuadCorner,mod_index(whichQuadCorner+1,4)]]
shouldBeB2Edge = thisQuad_cornerVertices[[whichQuadCorner,mod_index(whichQuadCorner-1,4)]]
if len(np.intersect1d(B1EdgeFromB1,shouldBeB1Edge)) == 2 and \
len(np.intersect1d(B2EdgeFromB2,shouldBeB2Edge)) == 2:
if len(np.intersect1d(B1EdgeFromB1,shouldBeSameAsB1Edge)) == 2 and \
len(np.intersect1d(B2EdgeFromB2,shouldBeSameAsB2Edge)) == 2:
isNotCounterClockwise = False
elif len(np.intersect1d(B1EdgeFromB1,isB1IfReversed)) == 2 and \
len(np.intersect1d(B2EdgeFromB2,isB2IfReversed)) == 2:
isNotCounterClockwise = True
else:
raise Exception('something is seriously wrong because the edges don`t add upp. '
'in checkB1B2OrientationReversal')
elif len(np.intersect1d(B1EdgeFromB1,shouldBeB2Edge)) == 2:
raise Exception('I didn`t write this function and the other one for you to be lazy '
'and not use the other to check for reversed b1b2')
else:
print 'quad_index: '
print quad_index
print 'vertex_index'
print vertex_index
raise Exception('ERROR! Something wrong with the edges around the quads! '
'Function checkB1B2OrientationReversal')
return isNotCounterClockwise
|
{
"content_hash": "871d32807202acf2c5ce96edce5584d5",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 110,
"avg_line_length": 42.490566037735846,
"alnum_prop": 0.683392539964476,
"repo_name": "BGCECSE2015/CADO",
"id": "9f9ed62b7afec838543e7762b928f7264943cf0b",
"size": "4504",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PYTHON/NURBSReconstruction/PetersScheme/helper_functions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "92113"
},
{
"name": "CMake",
"bytes": "1257"
},
{
"name": "Makefile",
"bytes": "554"
},
{
"name": "Python",
"bytes": "208324"
},
{
"name": "QMake",
"bytes": "514"
},
{
"name": "Shell",
"bytes": "479"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from celery.utils.serialization import pickle
from celery.tests.case import Case
class RegularException(Exception):
pass
class ArgOverrideException(Exception):
def __init__(self, message, status_code=10):
self.status_code = status_code
Exception.__init__(self, message, status_code)
class test_Pickle(Case):
def test_pickle_regular_exception(self):
exc = None
try:
raise RegularException('RegularException raised')
except RegularException as exc_:
exc = exc_
pickled = pickle.dumps({'exception': exc})
unpickled = pickle.loads(pickled)
exception = unpickled.get('exception')
self.assertTrue(exception)
self.assertIsInstance(exception, RegularException)
self.assertTupleEqual(exception.args, ('RegularException raised', ))
def test_pickle_arg_override_exception(self):
exc = None
try:
raise ArgOverrideException(
'ArgOverrideException raised', status_code=100,
)
except ArgOverrideException as exc_:
exc = exc_
pickled = pickle.dumps({'exception': exc})
unpickled = pickle.loads(pickled)
exception = unpickled.get('exception')
self.assertTrue(exception)
self.assertIsInstance(exception, ArgOverrideException)
self.assertTupleEqual(exception.args, (
'ArgOverrideException raised', 100))
self.assertEqual(exception.status_code, 100)
|
{
"content_hash": "9ca8c83d870bc548d21b9fe1f5ac670a",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 30.823529411764707,
"alnum_prop": 0.6418575063613231,
"repo_name": "sunze/py_flask",
"id": "6b65bb3c55f3ea4cddd741dbd467a068bce5df55",
"size": "1572",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "venv/lib/python3.4/site-packages/celery/tests/utils/test_pickle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "11745"
},
{
"name": "HTML",
"bytes": "34870"
},
{
"name": "JavaScript",
"bytes": "23176"
},
{
"name": "Mako",
"bytes": "7564"
},
{
"name": "Python",
"bytes": "12266826"
},
{
"name": "Shell",
"bytes": "3634"
}
],
"symlink_target": ""
}
|
"""\
=====================================
Timing the speed of primes algorithms
=====================================
"""
from __future__ import division
import sys
from itertools import islice
# Conditionally hack the PYTHONPATH.
if __name__ == '__main__':
import os
path = os.path.dirname(__file__)
parent, here = os.path.split(path)
sys.path.append(parent)
from pyprimes.compat23 import next
import pyprimes.awful as awful
import pyprimes.probabilistic as probabilistic
import pyprimes.sieves as sieves
YEAR100 = 100*365*24*60*60 # One hundred years, in seconds.
class Stopwatch(object):
def __init__(self, timer=None):
if timer is None:
from timeit import default_timer as timer
self.timer = timer
self.reset()
def reset(self):
"""Reset all the collected timer results."""
try:
del self._start
except AttributeError:
pass
self._elapsed = 0.0
def start(self):
"""Start the timer."""
self._start = self.timer()
def stop(self):
"""Stop the timer."""
t = self.timer()
self._elapsed = t - self._start
del self._start
@property
def elapsed(self):
return self._elapsed
def trial(generator, count, repeat=1):
timer = Stopwatch()
best = YEAR100
for i in range(repeat):
it = generator()
timer.reset()
timer.start()
# Go to the count-th prime as fast as possible.
p = next(islice(it, count-1, count))
timer.stop()
best = min(best, timer.elapsed)
return best
def run(generators, number, repeat=1):
print ("Calculating speeds for first %d primes..." % number)
template = "\r ...%d of %d %s"
heading = """\
Generator Elapsed Speed
(sec) (primes/sec)
=============================================================="""
records = []
timer = Stopwatch() # For measuring the total elapsed time.
timer.start()
N = len(generators)
for i, generator in enumerate(generators):
name = generator.__module__ + '.' + generator.__name__
sys.stdout.write((template % (i+1, N, name)).ljust(69))
sys.stdout.flush()
t = trial(generator, number, repeat)
records.append((number/t, t, name))
timer.stop()
sys.stdout.write("\r%-69s\n" % "Done!")
print ('Total elapsed time: %.1f seconds' % timer.elapsed)
print ('')
records.sort()
print (heading)
for speed, elapsed, name in records:
print ("%-36s %4.2f %8.1f" % (name, elapsed, speed))
print ('==============================================================\n')
VERY_SLOW = [awful.primes0, awful.primes1, awful.primes2, awful.turner]
SLOW = [awful.primes3, awful.primes4, probabilistic.primes]
FAST = [sieves.cookbook, sieves.croft, sieves.sieve, sieves.wheel]
MOST = SLOW + FAST
ALL = VERY_SLOW + MOST
run(VERY_SLOW + SLOW, 1000)
run([awful.primes3, awful.trial_division], 5000)
#run([awful.primes3, awful.trial_division], 50000)
#run([awful.primes3, awful.trial_division], 100000)
#run([awful.primes3, awful.trial_division], 200000)
exit()
run(ALL, 500, 3)
run(MOST, 10000)
run(FAST, 1000000)
"""
Python 2.6 or better
import multiprocessing
import time
# bar
def bar():
for i in range(100):
print "Tick"
time.sleep(1)
if __name__ == '__main__':
# Start bar as a process
p = multiprocessing.Process(target=bar)
p.start()
# Wait for 10 seconds or until process finishes
p.join(10)
# If thread is still active
if p.is_alive():
print "running... let's kill it..."
# Terminate
p.terminate()
p.join()
"""
"""
Unix only, Python 2.5 or better.
In [1]: import signal
# Register an handler for the timeout
In [2]: def handler(signum, frame):
...: print "Forever is over!"
...: raise Exception("end of time")
...:
# This function *may* run for an indetermined time...
In [3]: def loop_forever():
...: import time
...: while 1:
...: print "sec"
...: time.sleep(1)
...:
...:
# Register the signal function handler
In [4]: signal.signal(signal.SIGALRM, handler)
Out[4]: 0
# Define a timeout for your function
In [5]: signal.alarm(10)
Out[5]: 0
In [6]: try:
...: loop_forever()
...: except Exception, exc:
...: print exc
....:
sec
sec
sec
sec
sec
sec
sec
sec
Forever is over!
end of time
# Cancel the timer if the function returned before timeout
# (ok, mine won't but yours maybe will :)
In [7]: signal.alarm(0)
Out[7]: 0
"""
|
{
"content_hash": "b5fcda01e82f836de902cfc56aa3db6f",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 78,
"avg_line_length": 22.260663507109005,
"alnum_prop": 0.5673834362358953,
"repo_name": "uzumaxy/pyprimes",
"id": "921797e1106c1cc1de15d3673e4c12275b8512b5",
"size": "4872",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/pyprimes/speed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109073"
}
],
"symlink_target": ""
}
|
"""Common utility functions."""
import base64
import datetime
import hashlib
import json
import os
import random
import re
import string
import StringIO
import time
import unicodedata
import urllib
import urlparse
import zipfile
import yaml
import feconf # pylint: disable=relative-import
class InvalidInputException(Exception):
"""Error class for invalid input."""
pass
class ValidationError(Exception):
"""Error class for when a domain object fails validation."""
pass
class ExplorationConversionError(Exception):
"""Error class for when an exploration fails to convert from a certain
version to a certain version.
"""
pass
def create_enum(*sequential, **names):
enums = dict(zip(sequential, sequential), **names)
return type('Enum', (), enums)
def get_file_contents(filepath, raw_bytes=False, mode='r'):
"""Gets the contents of a file, given a relative filepath from oppia/."""
with open(filepath, mode) as f:
return f.read() if raw_bytes else f.read().decode('utf-8')
def get_exploration_components_from_dir(dir_path):
"""Gets the (yaml, assets) from the contents of an exploration data dir.
Args:
dir_path: a full path to the exploration root directory.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
yaml_content = None
assets_list = []
dir_path_array = dir_path.split('/')
while dir_path_array[-1] == '':
dir_path_array = dir_path_array[:-1]
dir_path_length = len(dir_path_array)
for root, dirs, files in os.walk(dir_path):
for directory in dirs:
if root == dir_path and directory != 'assets':
raise Exception(
'The only directory in %s should be assets/' % dir_path)
for filename in files:
filepath = os.path.join(root, filename)
if root == dir_path:
if filepath.endswith('.DS_Store'):
# These files are added automatically by Mac OS Xsystems.
# We ignore them.
continue
if yaml_content is not None:
raise Exception('More than one non-asset file specified '
'for %s' % dir_path)
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single non-asset file, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = get_file_contents(filepath)
else:
filepath_array = filepath.split('/')
# The additional offset is to remove the 'assets/' prefix.
filename = '/'.join(filepath_array[dir_path_length + 1:])
assets_list.append((filename, get_file_contents(
filepath, raw_bytes=True)))
if yaml_content is None:
raise Exception('No yaml file specifed for %s' % dir_path)
return yaml_content, assets_list
def get_exploration_components_from_zip(zip_file_contents):
"""Gets the (yaml, assets) from the contents of an exploration zip file.
Args:
zip_file_contents: a string of raw bytes representing the contents of
a zip file that comprises the exploration.
Returns:
a 2-tuple, the first element of which is a yaml string, and the second
element of which is a list of (filepath, content) 2-tuples. The filepath
does not include the assets/ prefix.
Raises:
Exception: if the following condition doesn't hold: "There is exactly one
file not in assets/, and this file has a .yaml suffix".
"""
memfile = StringIO.StringIO()
memfile.write(zip_file_contents)
zf = zipfile.ZipFile(memfile, 'r')
yaml_content = None
assets_list = []
for filepath in zf.namelist():
if filepath.startswith('assets/'):
assets_list.append('/'.join(filepath.split('/')[1:]),
zf.read(filepath))
else:
if yaml_content is not None:
raise Exception(
'More than one non-asset file specified for zip file')
elif not filepath.endswith('.yaml'):
raise Exception('Found invalid non-asset file %s. There '
'should only be a single file not in assets/, '
'and it should have a .yaml suffix.' %
filepath)
else:
yaml_content = zf.read(filepath)
if yaml_content is None:
raise Exception('No yaml file specified in zip file contents')
return yaml_content, assets_list
def get_comma_sep_string_from_list(items):
"""Turns a list of items into a comma-separated string."""
if not items:
return ''
if len(items) == 1:
return items[0]
return '%s and %s' % (', '.join(items[:-1]), items[-1])
def to_ascii(input_string):
"""Change unicode characters in a string to ascii if possible."""
return unicodedata.normalize(
'NFKD', unicode(input_string)).encode('ascii', 'ignore')
def yaml_from_dict(dictionary, width=80):
"""Gets the YAML representation of a dict."""
return yaml.safe_dump(dictionary, default_flow_style=False, width=width)
def dict_from_yaml(yaml_str):
"""Gets the dict representation of a YAML string."""
try:
retrieved_dict = yaml.safe_load(yaml_str)
assert isinstance(retrieved_dict, dict)
return retrieved_dict
except yaml.YAMLError as e:
raise InvalidInputException(e)
def recursively_remove_key(obj, key_to_remove):
"""Recursively removes keys from a list or dict."""
if isinstance(obj, list):
for item in obj:
recursively_remove_key(item, key_to_remove)
elif isinstance(obj, dict):
if key_to_remove in obj:
del obj[key_to_remove]
for key, unused_value in obj.items():
recursively_remove_key(obj[key], key_to_remove)
def get_random_int(upper_bound):
"""Returns a random integer in [0, upper_bound)."""
assert upper_bound >= 0 and isinstance(upper_bound, int)
generator = random.SystemRandom()
return generator.randrange(0, upper_bound)
def get_random_choice(alist):
"""Gets a random element from a list."""
assert isinstance(alist, list) and len(alist) > 0
index = get_random_int(len(alist))
return alist[index]
def convert_png_to_data_url(filepath):
"""Converts the png file at filepath to a data URL.
This method is currently used only in tests for RTE extensions.
"""
file_contents = get_file_contents(filepath, raw_bytes=True, mode='rb')
return 'data:image/png;base64,%s' % urllib.quote(
file_contents.encode('base64'))
def camelcase_to_hyphenated(camelcase_str):
intermediate_str = re.sub('(.)([A-Z][a-z]+)', r'\1-\2', camelcase_str)
return re.sub('([a-z0-9])([A-Z])', r'\1-\2', intermediate_str).lower()
def set_url_query_parameter(url, param_name, param_value):
"""Set or replace a query parameter, and return the modified URL."""
if not isinstance(param_name, basestring):
raise Exception(
'URL query parameter name must be a string, received %s'
% param_name)
scheme, netloc, path, query_string, fragment = urlparse.urlsplit(url)
query_params = urlparse.parse_qs(query_string)
query_params[param_name] = [param_value]
new_query_string = urllib.urlencode(query_params, doseq=True)
return urlparse.urlunsplit(
(scheme, netloc, path, new_query_string, fragment))
class JSONEncoderForHTML(json.JSONEncoder):
"""Encodes JSON that is safe to embed in HTML."""
def encode(self, o):
chunks = self.iterencode(o, True)
return ''.join(chunks) if self.ensure_ascii else u''.join(chunks)
def iterencode(self, o, _one_shot=False):
chunks = super(JSONEncoderForHTML, self).iterencode(o, _one_shot)
for chunk in chunks:
yield chunk.replace('&', '\\u0026').replace(
'<', '\\u003c').replace('>', '\\u003e')
def convert_to_hash(input_string, max_length):
"""Convert a string to a SHA1 hash."""
if not isinstance(input_string, basestring):
raise Exception(
'Expected string, received %s of type %s' %
(input_string, type(input_string)))
encoded_string = base64.urlsafe_b64encode(
hashlib.sha1(input_string.encode('utf-8')).digest())
return encoded_string[:max_length]
def base64_from_int(value):
return base64.b64encode(bytes([value]))
def get_time_in_millisecs(datetime_obj):
"""Returns time in milliseconds since the Epoch.
Args:
datetime_obj: An object of type datetime.datetime.
"""
seconds = time.mktime(datetime_obj.timetuple()) * 1000
return seconds + datetime_obj.microsecond / 1000.0
def get_current_time_in_millisecs():
"""Returns time in milliseconds since the Epoch."""
return get_time_in_millisecs(datetime.datetime.utcnow())
def get_human_readable_time_string(time_msec):
"""Given a time in milliseconds since the epoch, get a human-readable
time string for the admin dashboard.
"""
return time.strftime('%B %d %H:%M:%S', time.gmtime(time_msec / 1000.0))
def generate_random_string(length):
return base64.urlsafe_b64encode(os.urandom(length))
def generate_new_session_id():
return generate_random_string(24)
def vfs_construct_path(base_path, *path_components):
"""Mimics behavior of os.path.join on Posix machines."""
path = base_path
for component in path_components:
if component.startswith('/'):
path = component
elif path == '' or path.endswith('/'):
path += component
else:
path += '/%s' % component
return path
def vfs_normpath(path):
"""Normalize path from posixpath.py, eliminating double slashes, etc."""
# Preserve unicode (if path is unicode)
slash, dot = (u'/', u'.') if isinstance(path, unicode) else ('/', '.')
if path == '':
return dot
initial_slashes = path.startswith('/')
# POSIX allows one or two initial slashes, but treats three or more
# as single slash.
if (initial_slashes and
path.startswith('//') and not path.startswith('///')):
initial_slashes = 2
comps = path.split('/')
new_comps = []
for comp in comps:
if comp in ('', '.'):
continue
if (comp != '..' or
(not initial_slashes and not new_comps) or
(new_comps and new_comps[-1] == '..')):
new_comps.append(comp)
elif new_comps:
new_comps.pop()
comps = new_comps
path = slash.join(comps)
if initial_slashes:
path = slash * initial_slashes + path
return path or dot
def require_valid_name(name, name_type):
"""Generic name validation.
Args:
name: the name to validate.
name_type: a human-readable string, like 'the exploration title' or
'a state name'. This will be shown in error messages.
"""
# This check is needed because state names are used in URLs and as ids
# for statistics, so the name length should be bounded above.
if len(name) > 50 or len(name) < 1:
raise ValidationError(
'The length of %s should be between 1 and 50 '
'characters; received %s' % (name_type, name))
if name[0] in string.whitespace or name[-1] in string.whitespace:
raise ValidationError(
'Names should not start or end with whitespace.')
if re.search(r'\s\s+', name):
raise ValidationError(
'Adjacent whitespace in %s should be collapsed.' % name_type)
for character in feconf.INVALID_NAME_CHARS:
if character in name:
raise ValidationError(
'Invalid character %s in %s: %s' %
(character, name_type, name))
def capitalize_string(input_string):
"""Converts the first character of a string to its uppercase equivalent (if
it's a letter), and returns the result.
"""
# This guards against empty strings.
if input_string:
return input_string[0].upper() + input_string[1:]
else:
return input_string
def get_info_card_url_for_category(category):
info_card_color = (
feconf.CATEGORIES_TO_COLORS[category] if
category in feconf.CATEGORIES_TO_COLORS else feconf.DEFAULT_COLOR)
return (
'/images/gallery/exploration_background_%s_large.png' %
info_card_color)
def get_hex_color_for_category(category):
color = (
feconf.CATEGORIES_TO_COLORS[category]
if category in feconf.CATEGORIES_TO_COLORS
else feconf.DEFAULT_COLOR)
return feconf.COLORS_TO_HEX_VALUES[color]
def get_thumbnail_icon_url_for_category(category):
icon_name = (
category if category in feconf.DEFAULT_CATEGORIES
else feconf.DEFAULT_THUMBNAIL_ICON)
# Remove all spaces from the string.
return '/images/gallery/thumbnails/%s.svg' % icon_name.replace(' ', '')
def _get_short_language_description(full_language_description):
"""Given one of the descriptions in feconf.ALL_LANGUAGE_CODES, generates
the corresponding short description.
"""
if ' (' not in full_language_description:
return full_language_description
else:
ind = full_language_description.find(' (')
return full_language_description[:ind]
def get_all_language_codes_and_names():
return [{
'code': lc['code'],
'name': _get_short_language_description(lc['description']),
} for lc in feconf.ALL_LANGUAGE_CODES]
|
{
"content_hash": "cd1855be9471ea598e5dca7db86900ff",
"timestamp": "",
"source": "github",
"line_count": 433,
"max_line_length": 79,
"avg_line_length": 32.79445727482679,
"alnum_prop": 0.6216197183098592,
"repo_name": "kennho/oppia",
"id": "027e7f3481d23a47de98871e85fbc97433c5d230",
"size": "14805",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "utils.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "363"
},
{
"name": "CSS",
"bytes": "59360"
},
{
"name": "HTML",
"bytes": "420850"
},
{
"name": "JavaScript",
"bytes": "2031740"
},
{
"name": "Python",
"bytes": "2178413"
},
{
"name": "Shell",
"bytes": "37757"
}
],
"symlink_target": ""
}
|
"""
This script fixes links that contain common spelling mistakes.
This is only possible on wikis that have a template for these misspellings.
Command line options:
-always:XY instead of asking the user what to do, always perform the same
action. For example, XY can be "r0", "u" or "2". Be careful with
this option, and check the changes made by the bot. Note that
some choices for XY don't make sense and will result in a loop,
e.g. "l" or "m".
-start:XY goes through all misspellings in the category on your wiki
that is defined (to the bot) as the category containing
misspelling pages, starting at XY. If the -start argument is not
given, it starts at the beginning.
-main only check pages in the main namespace, not in the talk,
wikipedia, user, etc. namespaces.
"""
# (C) Daniel Herding, 2007
# (C) Pywikibot team, 2007-2015
#
# Distributed under the terms of the MIT license.
#
from __future__ import unicode_literals
__version__ = '$Id$'
#
import pywikibot
from pywikibot import i18n, pagegenerators
from pywikibot.tools import PY2
from solve_disambiguation import DisambiguationRobot
if not PY2:
basestring = (str, )
HELP_MSG = """\n
mispelling.py does not support site {site}.
Help Pywikibot team to provide support for your wiki by submitting
a bug to:
https://phabricator.wikimedia.org/maniphest/task/create/?projects=pywikibot-core
with category containing misspelling pages or a template for
these misspellings.\n"""
class MisspellingRobot(DisambiguationRobot):
"""Spelling bot."""
misspellingTemplate = {
'de': ('Falschschreibung', 'Obsolete Schreibung'),
}
# Optional: if there is a category, one can use the -start
# parameter.
misspellingCategory = {
'da': u'Omdirigeringer af fejlstavninger', # only contains date redirects at the moment
'de': ('Kategorie:Wikipedia:Falschschreibung',
'Kategorie:Wikipedia:Obsolete Schreibung'),
'en': u'Redirects from misspellings',
'hu': u'Átirányítások hibás névről',
'nl': u'Categorie:Wikipedia:Redirect voor spelfout',
}
def __init__(self, always, firstPageTitle, main_only):
"""Constructor."""
super(MisspellingRobot, self).__init__(
always, [], True, False, None, False, main_only)
self.generator = self.createPageGenerator(firstPageTitle)
def createPageGenerator(self, firstPageTitle):
"""
Generator to retrieve misspelling pages or misspelling redirects.
@rtype: generator
"""
mylang = self.site.code
if mylang in self.misspellingCategory:
categories = self.misspellingCategory[mylang]
if isinstance(categories, basestring):
categories = (categories, )
generators = (
pagegenerators.CategorizedPageGenerator(
pywikibot.Category(self.site, misspellingCategoryTitle),
recurse=True, start=firstPageTitle)
for misspellingCategoryTitle in categories)
elif mylang in self.misspellingTemplate:
templates = self.misspellingTemplate[mylang]
if isinstance(templates, basestring):
templates = (templates, )
generators = (
pagegenerators.ReferringPageGenerator(
pywikibot.Page(self.site, misspellingTemplateName, ns=10),
onlyTemplateInclusion=True)
for misspellingTemplateName in templates)
if firstPageTitle:
pywikibot.output(
u'-start parameter unsupported on this wiki because there '
u'is no category for misspellings.')
else:
pywikibot.output(HELP_MSG.format(site=self.site))
empty_gen = (i for i in [])
return empty_gen
generator = pagegenerators.CombinedPageGenerator(generators)
preloadingGen = pagegenerators.PreloadingGenerator(generator)
return preloadingGen
def findAlternatives(self, disambPage):
"""
Append link target to a list of alternative links.
Overrides the DisambiguationRobot method.
@return: True if alternate link was appended
@rtype: bool or None
"""
if disambPage.isRedirectPage():
self.alternatives.append(disambPage.getRedirectTarget().title())
return True
if self.misspellingTemplate.get(disambPage.site.code) is not None:
for template, params in disambPage.templatesWithParams():
if (template.title(withNamespace=False) ==
self.misspellingTemplate[disambPage.site.code]):
# The correct spelling is in the last paramter.
correctSpelling = params[-1]
# On de.wikipedia, there are some cases where the
# misspelling is ambigous, see for example:
# https://de.wikipedia.org/wiki/Buthan
for match in self.linkR.finditer(correctSpelling):
self.alternatives.append(match.group('title'))
if not self.alternatives:
# There were no links in the parameter, so there is
# only one correct spelling.
self.alternatives.append(correctSpelling)
return True
def setSummaryMessage(self, disambPage, *args, **kwargs):
"""
Setup the summary message.
Overrides the DisambiguationRobot method.
"""
# TODO: setSummaryMessage() in solve_disambiguation now has parameters
# new_targets and unlink. Make use of these here.
self.comment = i18n.twtranslate(self.site, 'misspelling-fixing',
{'page': disambPage.title()})
def main(*args):
"""
Process command line arguments and invoke bot.
If args is an empty list, sys.argv is used.
@param args: command line arguments
@type args: list of unicode
"""
# the option that's always selected when the bot wonders what to do with
# a link. If it's None, the user is prompted (default behaviour).
always = None
main_only = False
firstPageTitle = None
for arg in pywikibot.handle_args(args):
if arg.startswith('-always:'):
always = arg[8:]
elif arg.startswith('-start'):
if len(arg) == 6:
firstPageTitle = pywikibot.input(
u'At which page do you want to start?')
else:
firstPageTitle = arg[7:]
elif arg == '-main':
main_only = True
bot = MisspellingRobot(always, firstPageTitle, main_only)
bot.run()
if __name__ == "__main__":
main()
|
{
"content_hash": "a9e95ed2dd12c0dc7d2eec7068a64603",
"timestamp": "",
"source": "github",
"line_count": 187,
"max_line_length": 96,
"avg_line_length": 37.23529411764706,
"alnum_prop": 0.6146775815022261,
"repo_name": "trishnaguha/pywikibot-core",
"id": "05dc7b0f5f34ef7614f552e07243f8d3a2455917",
"size": "7013",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scripts/misspelling.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "97"
},
{
"name": "Python",
"bytes": "3821251"
}
],
"symlink_target": ""
}
|
import threading
import unittest
import socket
import os
import shutil
import time
import re
import tempfile
import ftplib
import random
import warnings
import sys
import errno
import asyncore
import atexit
import stat
try:
import cStringIO as StringIO
except ImportError:
import StringIO
try:
import ssl
except ImportError:
ssl = None
try:
import sendfile
except ImportError:
sendfile = None
from pyftpdlib import ftpserver
# Attempt to use IP rather than hostname (test suite will run a lot faster)
try:
HOST = socket.gethostbyname('localhost')
except socket.error:
HOST = 'localhost'
USER = 'user'
PASSWD = '12345'
HOME = os.getcwd()
TESTFN = 'tmp-pyftpdlib'
def try_address(host, port=0, family=socket.AF_INET):
"""Try to bind a socket on the given host:port and return True
if that has been possible."""
try:
sock = socket.socket(family)
sock.bind((host, port))
except (socket.error, socket.gaierror):
return False
else:
sock.close()
return True
def support_hybrid_ipv6():
"""Return True if it is possible to use hybrid IPv6/IPv4 sockets
on this platform.
"""
# IPPROTO_IPV6 constant is broken, see: http://bugs.python.org/issue6926
IPPROTO_IPV6 = getattr(socket, "IPV6_V6ONLY", 41)
IPV6_V6ONLY = getattr(socket, "IPV6_V6ONLY", 26)
sock = socket.socket(socket.AF_INET6)
try:
try:
return not sock.getsockopt(IPPROTO_IPV6, IPV6_V6ONLY)
except socket.error:
return False
finally:
sock.close()
SUPPORTS_IPV4 = try_address('127.0.0.1')
SUPPORTS_IPV6 = socket.has_ipv6 and try_address('::1', family=socket.AF_INET6)
SUPPORTS_HYBRID_IPV6 = SUPPORTS_IPV6 and support_hybrid_ipv6()
SUPPORTS_SENDFILE = sendfile is not None
def safe_remove(*files):
"Convenience function for removing temporary test files"
for file in files:
try:
os.remove(file)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def safe_rmdir(dir):
"Convenience function for removing temporary test directories"
try:
os.rmdir(dir)
except OSError, err:
if err.errno != errno.ENOENT:
raise
def touch(name):
"""Create a file and return its name."""
assert not os.path.isfile(name), name
f = open(name, 'w')
try:
return f.name
finally:
f.close()
def onexit():
"""Convenience function for removing temporary files and
directories on interpreter exit.
Also closes all sockets/instances left behind in asyncore
socket map (if any).
"""
for name in os.listdir('.'):
if name.startswith(tempfile.template):
if os.path.isdir(name):
shutil.rmtree(name)
else:
os.remove(name)
map = asyncore.socket_map
for x in map.values():
try:
sys.stderr.write("garbage: %s\n" % repr(x))
x.close()
except:
pass
map.clear()
# commented out as per bug http://bugs.python.org/issue10354
#tempfile.template = 'tmp-pyftpdlib'
atexit.register(onexit)
# lower this threshold so that the scheduler internal queue
# gets re-heapified more often
ftpserver._scheduler.cancellations_threshold = 5
class FTPd(threading.Thread):
"""A threaded FTP server used for running tests.
This is basically a modified version of the FTPServer class which
wraps the polling loop into a thread.
The instance returned can be used to start(), stop() and
eventually re-start() the server.
"""
handler = ftpserver.FTPHandler
def __init__(self, host=HOST, port=0, verbose=False):
threading.Thread.__init__(self)
self.__serving = False
self.__stopped = False
self.__lock = threading.Lock()
self.__flag = threading.Event()
if not verbose:
ftpserver.log = ftpserver.logline = lambda x: x
# this makes the threaded server raise an actual exception
# instead of just logging its traceback
def logerror(msg):
raise
ftpserver.logerror = logerror
authorizer = ftpserver.DummyAuthorizer()
authorizer.add_user(USER, PASSWD, HOME, perm='elradfmwM') # full perms
authorizer.add_anonymous(HOME)
self.handler.authorizer = authorizer
self.server = ftpserver.FTPServer((host, port), self.handler)
self.host, self.port = self.server.socket.getsockname()[:2]
def __repr__(self):
status = [self.__class__.__module__ + "." + self.__class__.__name__]
if self.__serving:
status.append('active')
else:
status.append('inactive')
status.append('%s:%s' % self.server.socket.getsockname()[:2])
return '<%s at %#x>' % (' '.join(status), id(self))
@property
def running(self):
return self.__serving
def start(self, timeout=0.001, use_poll=False):
"""Start serving until an explicit stop() request.
Polls for shutdown every 'timeout' seconds.
"""
if self.__serving:
raise RuntimeError("Server already started")
if self.__stopped:
# ensure the server can be started again
FTPd.__init__(self, self.server.socket.getsockname(), self.handler)
self.__timeout = timeout
self.__use_poll = use_poll
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.__serving = True
self.__flag.set()
while self.__serving and asyncore.socket_map:
self.__lock.acquire()
self.server.serve_forever(timeout=self.__timeout, count=1,
use_poll=self.__use_poll)
self.__lock.release()
self.server.close_all()
def stop(self):
"""Stop serving (also disconnecting all currently connected
clients) by telling the serve_forever() loop to stop and
waits until it does.
"""
if not self.__serving:
raise RuntimeError("Server not started yet")
self.__serving = False
self.__stopped = True
self.join()
class TestAbstractedFS(unittest.TestCase):
"""Test for conversion utility methods of AbstractedFS class."""
def setUp(self):
safe_remove(TESTFN)
tearDown = setUp
def test_ftpnorm(self):
# Tests for ftpnorm method.
ae = self.assertEquals
fs = ftpserver.AbstractedFS('/', None)
fs._cwd = '/'
ae(fs.ftpnorm(''), '/')
ae(fs.ftpnorm('/'), '/')
ae(fs.ftpnorm('.'), '/')
ae(fs.ftpnorm('..'), '/')
ae(fs.ftpnorm('a'), '/a')
ae(fs.ftpnorm('/a'), '/a')
ae(fs.ftpnorm('/a/'), '/a')
ae(fs.ftpnorm('a/..'), '/')
ae(fs.ftpnorm('a/b'), '/a/b')
ae(fs.ftpnorm('a/b/..'), '/a')
ae(fs.ftpnorm('a/b/../..'), '/')
fs._cwd = '/sub'
ae(fs.ftpnorm(''), '/sub')
ae(fs.ftpnorm('/'), '/')
ae(fs.ftpnorm('.'), '/sub')
ae(fs.ftpnorm('..'), '/')
ae(fs.ftpnorm('a'), '/sub/a')
ae(fs.ftpnorm('a/'), '/sub/a')
ae(fs.ftpnorm('a/..'), '/sub')
ae(fs.ftpnorm('a/b'), '/sub/a/b')
ae(fs.ftpnorm('a/b/'), '/sub/a/b')
ae(fs.ftpnorm('a/b/..'), '/sub/a')
ae(fs.ftpnorm('a/b/../..'), '/sub')
ae(fs.ftpnorm('a/b/../../..'), '/')
ae(fs.ftpnorm('//'), '/') # UNC paths must be collapsed
def test_ftp2fs(self):
# Tests for ftp2fs method.
ae = self.assertEquals
fs = ftpserver.AbstractedFS('/', None)
join = lambda x, y: os.path.join(x, y.replace('/', os.sep))
def goforit(root):
fs._root = root
fs._cwd = '/'
ae(fs.ftp2fs(''), root)
ae(fs.ftp2fs('/'), root)
ae(fs.ftp2fs('.'), root)
ae(fs.ftp2fs('..'), root)
ae(fs.ftp2fs('a'), join(root, 'a'))
ae(fs.ftp2fs('/a'), join(root, 'a'))
ae(fs.ftp2fs('/a/'), join(root, 'a'))
ae(fs.ftp2fs('a/..'), root)
ae(fs.ftp2fs('a/b'), join(root, r'a/b'))
ae(fs.ftp2fs('/a/b'), join(root, r'a/b'))
ae(fs.ftp2fs('/a/b/..'), join(root, 'a'))
ae(fs.ftp2fs('/a/b/../..'), root)
fs._cwd = '/sub'
ae(fs.ftp2fs(''), join(root, 'sub'))
ae(fs.ftp2fs('/'), root)
ae(fs.ftp2fs('.'), join(root, 'sub'))
ae(fs.ftp2fs('..'), root)
ae(fs.ftp2fs('a'), join(root, 'sub/a'))
ae(fs.ftp2fs('a/'), join(root, 'sub/a'))
ae(fs.ftp2fs('a/..'), join(root, 'sub'))
ae(fs.ftp2fs('a/b'), join(root, 'sub/a/b'))
ae(fs.ftp2fs('a/b/..'), join(root, 'sub/a'))
ae(fs.ftp2fs('a/b/../..'), join(root, 'sub'))
ae(fs.ftp2fs('a/b/../../..'), root)
ae(fs.ftp2fs('//a'), join(root, 'a')) # UNC paths must be collapsed
if os.sep == '\\':
goforit(r'C:\dir')
goforit('C:\\')
# on DOS-derived filesystems (e.g. Windows) this is the same
# as specifying the current drive directory (e.g. 'C:\\')
goforit('\\')
elif os.sep == '/':
goforit('/home/user')
goforit('/')
else:
# os.sep == ':'? Don't know... let's try it anyway
goforit(os.getcwd())
def test_fs2ftp(self):
# Tests for fs2ftp method.
ae = self.assertEquals
fs = ftpserver.AbstractedFS('/', None)
join = lambda x, y: os.path.join(x, y.replace('/', os.sep))
def goforit(root):
fs._root = root
ae(fs.fs2ftp(root), '/')
ae(fs.fs2ftp(join(root, '/')), '/')
ae(fs.fs2ftp(join(root, '.')), '/')
ae(fs.fs2ftp(join(root, '..')), '/') # can't escape from root
ae(fs.fs2ftp(join(root, 'a')), '/a')
ae(fs.fs2ftp(join(root, 'a/')), '/a')
ae(fs.fs2ftp(join(root, 'a/..')), '/')
ae(fs.fs2ftp(join(root, 'a/b')), '/a/b')
ae(fs.fs2ftp(join(root, 'a/b')), '/a/b')
ae(fs.fs2ftp(join(root, 'a/b/..')), '/a')
ae(fs.fs2ftp(join(root, '/a/b/../..')), '/')
fs._cwd = '/sub'
ae(fs.fs2ftp(join(root, 'a/')), '/a')
if os.sep == '\\':
goforit(r'C:\dir')
goforit('C:\\')
# on DOS-derived filesystems (e.g. Windows) this is the same
# as specifying the current drive directory (e.g. 'C:\\')
goforit('\\')
fs._root = r'C:\dir'
ae(fs.fs2ftp('C:\\'), '/')
ae(fs.fs2ftp('D:\\'), '/')
ae(fs.fs2ftp('D:\\dir'), '/')
elif os.sep == '/':
goforit('/')
if os.path.realpath('/__home/user') != '/__home/user':
self.fail('Test skipped (symlinks not allowed).')
goforit('/__home/user')
fs._root = '/__home/user'
ae(fs.fs2ftp('/__home'), '/')
ae(fs.fs2ftp('/'), '/')
ae(fs.fs2ftp('/__home/userx'), '/')
else:
# os.sep == ':'? Don't know... let's try it anyway
goforit(os.getcwd())
def test_validpath(self):
# Tests for validpath method.
fs = ftpserver.AbstractedFS('/', None)
fs._root = HOME
self.assertTrue(fs.validpath(HOME))
self.assertTrue(fs.validpath(HOME + '/'))
self.assertFalse(fs.validpath(HOME + 'bar'))
if hasattr(os, 'symlink'):
def test_validpath_validlink(self):
# Test validpath by issuing a symlink pointing to a path
# inside the root directory.
fs = ftpserver.AbstractedFS('/', None)
fs._root = HOME
TESTFN2 = TESTFN + '1'
try:
touch(TESTFN)
os.symlink(TESTFN, TESTFN2)
self.assertTrue(fs.validpath(TESTFN))
finally:
safe_remove(TESTFN, TESTFN2)
def test_validpath_external_symlink(self):
# Test validpath by issuing a symlink pointing to a path
# outside the root directory.
fs = ftpserver.AbstractedFS('/', None)
fs._root = HOME
# tempfile should create our file in /tmp directory
# which should be outside the user root. If it is
# not we just skip the test.
file = tempfile.NamedTemporaryFile()
try:
if HOME == os.path.dirname(file.name):
return
os.symlink(file.name, TESTFN)
self.assertFalse(fs.validpath(TESTFN))
finally:
safe_remove(TESTFN)
file.close()
class TestDummyAuthorizer(unittest.TestCase):
"""Tests for DummyAuthorizer class."""
# temporarily change warnings to exceptions for the purposes of testing
def setUp(self):
self.tempdir = tempfile.mkdtemp(dir=HOME)
self.subtempdir = tempfile.mkdtemp(dir=os.path.join(HOME, self.tempdir))
self.tempfile = touch(os.path.join(self.tempdir, TESTFN))
self.subtempfile = touch(os.path.join(self.subtempdir, TESTFN))
warnings.filterwarnings("error")
def tearDown(self):
os.remove(self.tempfile)
os.remove(self.subtempfile)
os.rmdir(self.subtempdir)
os.rmdir(self.tempdir)
warnings.resetwarnings()
def assertRaisesWithMsg(self, excClass, msg, callableObj, *args, **kwargs):
try:
callableObj(*args, **kwargs)
except excClass, why:
if str(why) == msg:
return
raise self.failureException("%s != %s" % (str(why), msg))
else:
if hasattr(excClass,'__name__'): excName = excClass.__name__
else: excName = str(excClass)
raise self.failureException, "%s not raised" % excName
def test_common_methods(self):
auth = ftpserver.DummyAuthorizer()
# create user
auth.add_user(USER, PASSWD, HOME)
auth.add_anonymous(HOME)
# check credentials
self.assertTrue(auth.validate_authentication(USER, PASSWD))
self.assertFalse(auth.validate_authentication(USER, 'wrongpwd'))
# remove them
auth.remove_user(USER)
auth.remove_user('anonymous')
# raise exc if user does not exists
self.assertRaises(KeyError, auth.remove_user, USER)
# raise exc if path does not exist
self.assertRaisesWithMsg(ValueError,
'no such directory: "%s"' % '?:\\',
auth.add_user, USER, PASSWD, '?:\\')
self.assertRaisesWithMsg(ValueError,
'no such directory: "%s"' % '?:\\',
auth.add_anonymous, '?:\\')
# raise exc if user already exists
auth.add_user(USER, PASSWD, HOME)
auth.add_anonymous(HOME)
self.assertRaisesWithMsg(ValueError,
'user "%s" already exists' % USER,
auth.add_user, USER, PASSWD, HOME)
self.assertRaisesWithMsg(ValueError,
'user "anonymous" already exists',
auth.add_anonymous, HOME)
auth.remove_user(USER)
auth.remove_user('anonymous')
# raise on wrong permission
self.assertRaisesWithMsg(ValueError,
'no such permission "?"',
auth.add_user, USER, PASSWD, HOME, perm='?')
self.assertRaisesWithMsg(ValueError,
'no such permission "?"',
auth.add_anonymous, HOME, perm='?')
# expect warning on write permissions assigned to anonymous user
for x in "adfmw":
self.assertRaisesWithMsg(RuntimeWarning,
"write permissions assigned to anonymous user.",
auth.add_anonymous, HOME, perm=x)
def test_override_perm_interface(self):
auth = ftpserver.DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
# raise exc if user does not exists
self.assertRaises(KeyError, auth.override_perm, USER+'w', HOME, 'elr')
# raise exc if path does not exist or it's not a directory
self.assertRaisesWithMsg(ValueError,
'no such directory: "%s"' % '?:\\',
auth.override_perm, USER, '?:\\', 'elr')
self.assertRaisesWithMsg(ValueError,
'no such directory: "%s"' % self.tempfile,
auth.override_perm, USER, self.tempfile, 'elr')
# raise on wrong permission
self.assertRaisesWithMsg(ValueError,
'no such permission "?"', auth.override_perm,
USER, HOME, perm='?')
# expect warning on write permissions assigned to anonymous user
auth.add_anonymous(HOME)
for p in "adfmw":
self.assertRaisesWithMsg(RuntimeWarning,
"write permissions assigned to anonymous user.",
auth.override_perm, 'anonymous', HOME, p)
# raise on attempt to override home directory permissions
self.assertRaisesWithMsg(ValueError,
"can't override home directory permissions",
auth.override_perm, USER, HOME, perm='w')
# raise on attempt to override a path escaping home directory
if os.path.dirname(HOME) != HOME:
self.assertRaisesWithMsg(ValueError,
"path escapes user home directory",
auth.override_perm, USER,
os.path.dirname(HOME), perm='w')
# try to re-set an overridden permission
auth.override_perm(USER, self.tempdir, perm='w')
auth.override_perm(USER, self.tempdir, perm='wr')
def test_override_perm_recursive_paths(self):
auth = ftpserver.DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), False)
auth.override_perm(USER, self.tempdir, perm='w', recursive=True)
self.assertEqual(auth.has_perm(USER, 'w', HOME), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), True)
self.assertEqual(auth.has_perm(USER, 'w', self.tempfile), True)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempdir), True)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempfile), True)
self.assertEqual(auth.has_perm(USER, 'w', HOME + '@'), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir + '@'), False)
path = os.path.join(self.tempdir + '@', os.path.basename(self.tempfile))
self.assertEqual(auth.has_perm(USER, 'w', path), False)
# test case-sensitiveness
if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'):
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir.upper()), True)
def test_override_perm_not_recursive_paths(self):
auth = ftpserver.DummyAuthorizer()
auth.add_user(USER, PASSWD, HOME, perm='elr')
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), False)
auth.override_perm(USER, self.tempdir, perm='w')
self.assertEqual(auth.has_perm(USER, 'w', HOME), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir), True)
self.assertEqual(auth.has_perm(USER, 'w', self.tempfile), True)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempdir), False)
self.assertEqual(auth.has_perm(USER, 'w', self.subtempfile), False)
self.assertEqual(auth.has_perm(USER, 'w', HOME + '@'), False)
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir + '@'), False)
path = os.path.join(self.tempdir + '@', os.path.basename(self.tempfile))
self.assertEqual(auth.has_perm(USER, 'w', path), False)
# test case-sensitiveness
if (os.name in ('nt', 'ce')) or (sys.platform == 'cygwin'):
self.assertEqual(auth.has_perm(USER, 'w', self.tempdir.upper()), True)
class TestCallLater(unittest.TestCase):
"""Tests for CallLater class."""
def setUp(self):
for task in ftpserver._scheduler._tasks:
if not task.cancelled:
task.cancel()
del ftpserver._scheduler._tasks[:]
def scheduler(self, timeout=0.01, count=100):
while ftpserver._scheduler._tasks and count > 0:
ftpserver._scheduler()
count -= 1
time.sleep(timeout)
def test_interface(self):
fun = lambda: 0
self.assertRaises(AssertionError, ftpserver.CallLater, -1, fun)
x = ftpserver.CallLater(3, fun)
self.assertRaises(AssertionError, x.delay, -1)
self.assertEqual(x.cancelled, False)
x.cancel()
self.assertEqual(x.cancelled, True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
self.assertRaises(AssertionError, x.delay, 2)
self.assertRaises(AssertionError, x.cancel)
def test_order(self):
l = []
fun = lambda x: l.append(x)
for x in [0.05, 0.04, 0.03, 0.02, 0.01]:
ftpserver.CallLater(x, fun, x)
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.04, 0.05])
def test_delay(self):
l = []
fun = lambda x: l.append(x)
ftpserver.CallLater(0.01, fun, 0.01).delay(0.07)
ftpserver.CallLater(0.02, fun, 0.02).delay(0.08)
ftpserver.CallLater(0.03, fun, 0.03)
ftpserver.CallLater(0.04, fun, 0.04)
ftpserver.CallLater(0.05, fun, 0.05)
ftpserver.CallLater(0.06, fun, 0.06).delay(0.001)
self.scheduler()
self.assertEqual(l, [0.06, 0.03, 0.04, 0.05, 0.01, 0.02])
# The test is reliable only on those systems where time.time()
# provides time with a better precision than 1 second.
if not str(time.time()).endswith('.0'):
def test_reset(self):
l = []
fun = lambda x: l.append(x)
ftpserver.CallLater(0.01, fun, 0.01)
ftpserver.CallLater(0.02, fun, 0.02)
ftpserver.CallLater(0.03, fun, 0.03)
x = ftpserver.CallLater(0.04, fun, 0.04)
ftpserver.CallLater(0.05, fun, 0.05)
time.sleep(0.1)
x.reset()
self.scheduler()
self.assertEqual(l, [0.01, 0.02, 0.03, 0.05, 0.04])
def test_cancel(self):
l = []
fun = lambda x: l.append(x)
ftpserver.CallLater(0.01, fun, 0.01).cancel()
ftpserver.CallLater(0.02, fun, 0.02)
ftpserver.CallLater(0.03, fun, 0.03)
ftpserver.CallLater(0.04, fun, 0.04)
ftpserver.CallLater(0.05, fun, 0.05).cancel()
self.scheduler()
self.assertEqual(l, [0.02, 0.03, 0.04])
def test_errback(self):
l = []
ftpserver.CallLater(0.0, lambda: 1//0, _errback=lambda: l.append(True))
self.scheduler()
self.assertEqual(l, [True])
class TestCallEvery(unittest.TestCase):
"""Tests for CallEvery class."""
def setUp(self):
for task in ftpserver._scheduler._tasks:
if not task.cancelled:
task.cancel()
del ftpserver._scheduler._tasks[:]
def scheduler(self, timeout=0.0001):
for x in range(100):
ftpserver._scheduler()
time.sleep(timeout)
def test_interface(self):
fun = lambda: 0
self.assertRaises(AssertionError, ftpserver.CallEvery, -1, fun)
x = ftpserver.CallEvery(3, fun)
self.assertRaises(AssertionError, x.delay, -1)
self.assertEqual(x.cancelled, False)
x.cancel()
self.assertEqual(x.cancelled, True)
self.assertRaises(AssertionError, x.call)
self.assertRaises(AssertionError, x.reset)
self.assertRaises(AssertionError, x.delay, 2)
self.assertRaises(AssertionError, x.cancel)
def test_only_once(self):
# make sure that callback is called only once per-loop
l1 = []
fun = lambda: l1.append(None)
ftpserver.CallEvery(0, fun)
ftpserver._scheduler()
self.assertEqual(l1, [None])
def test_multi_0_timeout(self):
# make sure a 0 timeout callback is called as many times
# as the number of loops
l = []
fun = lambda: l.append(None)
ftpserver.CallEvery(0, fun)
self.scheduler()
self.assertEqual(len(l), 100)
# run it on systems where time.time() has a higher precision
if os.name == 'posix':
def test_low_and_high_timeouts(self):
# make sure a callback with a lower timeout is called more
# frequently than another with a greater timeout
l1 = []
fun = lambda: l1.append(None)
ftpserver.CallEvery(0.001, fun)
self.scheduler()
l2 = []
fun = lambda: l2.append(None)
ftpserver.CallEvery(0.01, fun)
self.scheduler()
self.assertTrue(len(l1) > len(l2))
def test_cancel(self):
# make sure a cancelled callback doesn't get called anymore
l = []
fun = lambda: l.append(None)
call = ftpserver.CallEvery(0.001, fun)
self.scheduler()
len_l = len(l)
call.cancel()
self.scheduler()
self.assertEqual(len_l, len(l))
def test_errback(self):
l = []
ftpserver.CallEvery(0.0, lambda: 1//0, _errback=lambda: l.append(True))
self.scheduler()
self.assertTrue(l)
class TestFtpAuthentication(unittest.TestCase):
"test: USER, PASS, REIN."
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.handler._auth_failed_timeout = 0
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.file = open(TESTFN, 'w+b')
self.dummyfile = StringIO.StringIO()
def tearDown(self):
self.server.handler._auth_failed_timeout = 5
self.client.close()
self.server.stop()
if not self.file.closed:
self.file.close()
if not self.dummyfile.closed:
self.dummyfile.close()
os.remove(TESTFN)
def test_auth_ok(self):
self.client.login(user=USER, passwd=PASSWD)
def test_anon_auth(self):
self.client.login(user='anonymous', passwd='anon@')
self.client.login(user='anonymous', passwd='')
self.assertRaises(ftplib.error_perm, self.client.login, 'AnoNymouS')
def test_auth_failed(self):
self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
self.assertRaises(ftplib.error_perm, self.client.login, 'wrong', PASSWD)
self.assertRaises(ftplib.error_perm, self.client.login, 'wrong', 'wrong')
def test_wrong_cmds_order(self):
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pass ' + PASSWD)
self.client.login(user=USER, passwd=PASSWD)
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pass ' + PASSWD)
def test_max_auth(self):
self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
self.assertRaises(ftplib.error_perm, self.client.login, USER, 'wrong')
# If authentication fails for 3 times ftpd disconnects the
# client. We can check if that happens by using self.client.sendcmd()
# on the 'dead' socket object. If socket object is really
# closed it should be raised a socket.error exception (Windows)
# or a EOFError exception (Linux).
self.assertRaises((socket.error, EOFError), self.client.sendcmd, '')
def test_rein(self):
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('rein')
# user not authenticated, error response expected
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pwd')
# by logging-in again we should be able to execute a
# file-system command
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('pwd')
def test_rein_during_transfer(self):
# Test REIN while already authenticated and a transfer is
# in progress.
self.client.login(user=USER, passwd=PASSWD)
data = 'abcde12345' * 1000000
self.file.write(data)
self.file.close()
conn = self.client.transfercmd('retr ' + TESTFN)
rein_sent = False
bytes_recv = 0
while 1:
chunk = conn.recv(8192)
if not chunk:
break
bytes_recv += len(chunk)
self.dummyfile.write(chunk)
if bytes_recv > 65536 and not rein_sent:
rein_sent = True
# flush account, error response expected
self.client.sendcmd('rein')
self.assertRaises(ftplib.error_perm, self.client.dir)
# a 226 response is expected once tranfer finishes
self.assertEqual(self.client.voidresp()[:3], '226')
# account is still flushed, error response is still expected
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'size ' + TESTFN)
# by logging-in again we should be able to execute a
# filesystem command
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('pwd')
self.dummyfile.seek(0)
self.assertEqual(hash(data), hash (self.dummyfile.read()))
def test_user(self):
# Test USER while already authenticated and no transfer
# is in progress.
self.client.login(user=USER, passwd=PASSWD)
self.client.sendcmd('user ' + USER) # authentication flushed
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pwd')
self.client.sendcmd('pass ' + PASSWD)
self.client.sendcmd('pwd')
def test_user_during_transfer(self):
# Test USER while already authenticated and a transfer is
# in progress.
self.client.login(user=USER, passwd=PASSWD)
data = 'abcde12345' * 1000000
self.file.write(data)
self.file.close()
conn = self.client.transfercmd('retr ' + TESTFN)
rein_sent = 0
bytes_recv = 0
while 1:
chunk = conn.recv(8192)
if not chunk:
break
bytes_recv += len(chunk)
self.dummyfile.write(chunk)
# stop transfer while it isn't finished yet
if bytes_recv > 65536 and not rein_sent:
rein_sent = True
# flush account, expect an error response
self.client.sendcmd('user ' + USER)
self.assertRaises(ftplib.error_perm, self.client.dir)
# a 226 response is expected once transfer finishes
self.assertEqual(self.client.voidresp()[:3], '226')
# account is still flushed, error response is still expected
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pwd')
# by logging-in again we should be able to execute a
# filesystem command
self.client.sendcmd('pass ' + PASSWD)
self.client.sendcmd('pwd')
self.dummyfile.seek(0)
self.assertEqual(hash(data), hash (self.dummyfile.read()))
class TestFtpDummyCmds(unittest.TestCase):
"test: TYPE, STRU, MODE, NOOP, SYST, ALLO, HELP, SITE HELP"
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
self.server.stop()
def test_type(self):
self.client.sendcmd('type a')
self.client.sendcmd('type i')
self.client.sendcmd('type l7')
self.client.sendcmd('type l8')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'type ?!?')
def test_stru(self):
self.client.sendcmd('stru f')
self.client.sendcmd('stru F')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stru p')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stru r')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stru ?!?')
def test_mode(self):
self.client.sendcmd('mode s')
self.client.sendcmd('mode S')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'mode b')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'mode c')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'mode ?!?')
def test_noop(self):
self.client.sendcmd('noop')
def test_syst(self):
self.client.sendcmd('syst')
def test_allo(self):
self.client.sendcmd('allo x')
def test_quit(self):
self.client.sendcmd('quit')
def test_help(self):
self.client.sendcmd('help')
cmd = random.choice(ftpserver.proto_cmds.keys())
self.client.sendcmd('help %s' % cmd)
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'help ?!?')
def test_site(self):
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'site')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'site ?!?')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'site foo bar')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'sitefoo bar')
def test_site_help(self):
self.client.sendcmd('site help')
self.client.sendcmd('site help help')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'site help ?!?')
def test_rest(self):
# Test error conditions only; resumed data transfers are
# tested later.
self.client.sendcmd('type i')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest str')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest -1')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest 10.1')
# REST is not supposed to be allowed in ASCII mode
self.client.sendcmd('type a')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rest 10')
def test_opts_feat(self):
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'opts mlst bad_fact')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'opts mlst type ;')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'opts not_mlst')
# utility function which used for extracting the MLST "facts"
# string from the FEAT response
def mlst():
resp = self.client.sendcmd('feat')
return re.search(r'^\s*MLST\s+(\S+)$', resp, re.MULTILINE).group(1)
# we rely on "type", "perm", "size", and "modify" facts which
# are those available on all platforms
self.assertTrue('type*;perm*;size*;modify*;' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst type;'), '200 MLST OPTS type;')
self.assertEqual(self.client.sendcmd('opts mLSt TypE;'), '200 MLST OPTS type;')
self.assertTrue('type*;perm;size;modify;' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst'), '200 MLST OPTS ')
self.assertTrue(not '*' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst fish;cakes;'), '200 MLST OPTS ')
self.assertTrue(not '*' in mlst())
self.assertEqual(self.client.sendcmd('opts mlst fish;cakes;type;'),
'200 MLST OPTS type;')
self.assertTrue('type*;perm;size;modify;' in mlst())
class TestFtpCmdsSemantic(unittest.TestCase):
server_class = FTPd
client_class = ftplib.FTP
arg_cmds = ['allo','appe','dele','eprt','mdtm','mode','mkd','opts','port',
'rest','retr','rmd','rnfr','rnto','site','size','stor','stru',
'type','user','xmkd','xrmd','site chmod']
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
self.server.stop()
def test_arg_cmds(self):
# Test commands requiring an argument.
expected = "501 Syntax error: command needs an argument."
for cmd in self.arg_cmds:
self.client.putcmd(cmd)
resp = self.client.getmultiline()
self.assertEqual(resp, expected)
def test_no_arg_cmds(self):
# Test commands accepting no arguments.
expected = "501 Syntax error: command does not accept arguments."
for cmd in ('abor','cdup','feat','noop','pasv','pwd','quit','rein',
'syst','xcup','xpwd'):
self.client.putcmd(cmd + ' arg')
resp = self.client.getmultiline()
self.assertEqual(resp, expected)
def test_auth_cmds(self):
# Test those commands requiring client to be authenticated.
expected = "530 Log in with USER and PASS first."
self.client.sendcmd('rein')
for cmd in self.server.handler.proto_cmds:
cmd = cmd.lower()
if cmd in ('feat','help','noop','user','pass','stat','syst','quit',
'site', 'site help', 'pbsz', 'auth', 'prot', 'ccc'):
continue
if cmd in self.arg_cmds:
cmd = cmd + ' arg'
self.client.putcmd(cmd)
resp = self.client.getmultiline()
self.assertEqual(resp, expected)
def test_no_auth_cmds(self):
# Test those commands that do not require client to be authenticated.
self.client.sendcmd('rein')
for cmd in ('feat','help','noop','stat','syst','site help'):
self.client.sendcmd(cmd)
# STAT provided with an argument is equal to LIST hence not allowed
# if not authenticated
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stat /')
self.client.sendcmd('quit')
class TestFtpFsOperations(unittest.TestCase):
"test: PWD, CWD, CDUP, SIZE, RNFR, RNTO, DELE, MKD, RMD, MDTM, STAT"
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
self.tempfile = os.path.basename(touch(TESTFN))
self.tempdir = os.path.basename(tempfile.mkdtemp(dir=HOME))
def tearDown(self):
self.client.close()
self.server.stop()
safe_remove(self.tempfile)
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
def test_cwd(self):
self.client.cwd(self.tempdir)
self.assertEqual(self.client.pwd(), '/' + self.tempdir)
self.assertRaises(ftplib.error_perm, self.client.cwd, 'subtempdir')
# cwd provided with no arguments is supposed to move us to the
# root directory
self.client.sendcmd('cwd')
self.assertEqual(self.client.pwd(), '/')
def test_pwd(self):
self.assertEqual(self.client.pwd(), '/')
self.client.cwd(self.tempdir)
self.assertEqual(self.client.pwd(), '/' + self.tempdir)
def test_cdup(self):
subfolder = os.path.basename(tempfile.mkdtemp(dir=self.tempdir))
self.assertEqual(self.client.pwd(), '/')
self.client.cwd(self.tempdir)
self.assertEqual(self.client.pwd(), '/%s' % self.tempdir)
self.client.cwd(subfolder)
self.assertEqual(self.client.pwd(), '/%s/%s' % (self.tempdir, subfolder))
self.client.sendcmd('cdup')
self.assertEqual(self.client.pwd(), '/%s' % self.tempdir)
self.client.sendcmd('cdup')
self.assertEqual(self.client.pwd(), '/')
# make sure we can't escape from root directory
self.client.sendcmd('cdup')
self.assertEqual(self.client.pwd(), '/')
def test_mkd(self):
tempdir = os.path.basename(tempfile.mktemp(dir=HOME))
dirname = self.client.mkd(tempdir)
# the 257 response is supposed to include the absolute dirname
self.assertEqual(dirname, '/' + tempdir)
# make sure we can't create directories which already exist
# (probably not really necessary);
# let's use a try/except statement to avoid leaving behind
# orphaned temporary directory in the event of a test failure.
try:
self.client.mkd(tempdir)
except ftplib.error_perm:
os.rmdir(tempdir) # ok
else:
self.fail('ftplib.error_perm not raised.')
def test_rmd(self):
self.client.rmd(self.tempdir)
self.assertRaises(ftplib.error_perm, self.client.rmd, self.tempfile)
# make sure we can't remove the root directory
self.assertRaises(ftplib.error_perm, self.client.rmd, '/')
def test_dele(self):
self.client.delete(self.tempfile)
self.assertRaises(ftplib.error_perm, self.client.delete, self.tempdir)
def test_rnfr_rnto(self):
# rename file
tempname = os.path.basename(tempfile.mktemp(dir=HOME))
self.client.rename(self.tempfile, tempname)
self.client.rename(tempname, self.tempfile)
# rename dir
tempname = os.path.basename(tempfile.mktemp(dir=HOME))
self.client.rename(self.tempdir, tempname)
self.client.rename(tempname, self.tempdir)
# rnfr/rnto over non-existing paths
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.rename, bogus, '/x')
self.assertRaises(ftplib.error_perm, self.client.rename, self.tempfile, '/')
# rnto sent without first specifying the source
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'rnto ' + self.tempfile)
# make sure we can't rename root directory
self.assertRaises(ftplib.error_perm, self.client.rename, '/', '/x')
def test_mdtm(self):
self.client.sendcmd('mdtm ' + self.tempfile)
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'mdtm ' + bogus)
# make sure we can't use mdtm against directories
try:
self.client.sendcmd('mdtm ' + self.tempdir)
except ftplib.error_perm, err:
self.assertTrue("not retrievable" in str(err))
else:
self.fail('Exception not raised')
def test_unforeseen_mdtm_event(self):
# Emulate a case where the file last modification time is prior
# to year 1900. This most likely will never happen unless
# someone specifically force the last modification time of a
# file in some way.
# To do so we temporarily override os.path.getmtime so that it
# returns a negative value referring to a year prior to 1900.
# It causes time.localtime/gmtime to raise a ValueError exception
# which is supposed to be handled by server.
_getmtime = ftpserver.AbstractedFS.getmtime
try:
ftpserver.AbstractedFS.getmtime = lambda x, y: -9000000000
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'mdtm ' + self.tempfile)
# make sure client hasn't been disconnected
self.client.sendcmd('noop')
finally:
ftpserver.AbstractedFS.getmtime = _getmtime
def test_size(self):
self.client.sendcmd('type a')
self.assertRaises(ftplib.error_perm, self.client.size, self.tempfile)
self.client.sendcmd('type i')
self.client.size(self.tempfile)
# make sure we can't use size against directories
try:
self.client.sendcmd('size ' + self.tempdir)
except ftplib.error_perm, err:
self.assertTrue("not retrievable" in str(err))
else:
self.fail('Exception not raised')
if not hasattr(os, 'chmod'):
def test_site_chmod(self):
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'site chmod 777 ' + self.tempfile)
else:
def test_site_chmod(self):
# not enough args
self.assertRaises(ftplib.error_perm,
self.client.sendcmd, 'site chmod 777')
# bad args
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'site chmod -177 ' + self.tempfile)
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'site chmod 778 ' + self.tempfile)
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'site chmod foo ' + self.tempfile)
# on Windows it is possible to set read-only flag only
if os.name == 'nt':
self.client.sendcmd('site chmod 777 ' + self.tempfile)
mode = oct(stat.S_IMODE(os.stat(self.tempfile).st_mode))
self.assertEqual(mode, '0666')
self.client.sendcmd('site chmod 444 ' + self.tempfile)
mode = oct(stat.S_IMODE(os.stat(self.tempfile).st_mode))
self.assertEqual(mode, '0444')
self.client.sendcmd('site chmod 666 ' + self.tempfile)
mode = oct(stat.S_IMODE(os.stat(self.tempfile).st_mode))
self.assertEqual(mode, '0666')
else:
self.client.sendcmd('site chmod 777 ' + self.tempfile)
mode = oct(stat.S_IMODE(os.stat(self.tempfile).st_mode))
self.assertEqual(mode, '0777')
self.client.sendcmd('site chmod 755 ' + self.tempfile)
mode = oct(stat.S_IMODE(os.stat(self.tempfile).st_mode))
self.assertEqual(mode, '0755')
self.client.sendcmd('site chmod 555 ' + self.tempfile)
mode = oct(stat.S_IMODE(os.stat(self.tempfile).st_mode))
self.assertEqual(mode, '0555')
class TestFtpStoreData(unittest.TestCase):
"""Test STOR, STOU, APPE, REST, TYPE."""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
self.dummy_recvfile = StringIO.StringIO()
self.dummy_sendfile = StringIO.StringIO()
def tearDown(self):
self.client.close()
self.server.stop()
self.dummy_recvfile.close()
self.dummy_sendfile.close()
safe_remove(TESTFN)
def test_stor(self):
try:
data = 'abcde12345' * 100000
self.dummy_sendfile.write(data)
self.dummy_sendfile.seek(0)
self.client.storbinary('stor ' + TESTFN, self.dummy_sendfile)
self.client.retrbinary('retr ' + TESTFN, self.dummy_recvfile.write)
self.dummy_recvfile.seek(0)
self.assertEqual(hash(data), hash (self.dummy_recvfile.read()))
finally:
# We do not use os.remove() because file could still be
# locked by ftpd thread. If DELE through FTP fails try
# os.remove() as last resort.
if os.path.exists(TESTFN):
try:
self.client.delete(TESTFN)
except (ftplib.Error, EOFError, socket.error):
safe_remove(TESTFN)
def test_stor_active(self):
# Like test_stor but using PORT
self.client.set_pasv(False)
self.test_stor()
def test_stor_ascii(self):
# Test STOR in ASCII mode
def store(cmd, fp, blocksize=8192):
# like storbinary() except it sends "type a" instead of
# "type i" before starting the transfer
self.client.voidcmd('type a')
conn = self.client.transfercmd(cmd)
while 1:
buf = fp.read(blocksize)
if not buf:
break
conn.sendall(buf)
conn.close()
return self.client.voidresp()
try:
data = 'abcde12345\r\n' * 100000
self.dummy_sendfile.write(data)
self.dummy_sendfile.seek(0)
store('stor ' + TESTFN, self.dummy_sendfile)
self.client.retrbinary('retr ' + TESTFN, self.dummy_recvfile.write)
expected = data.replace('\r\n', os.linesep)
self.dummy_recvfile.seek(0)
self.assertEqual(hash(expected), hash(self.dummy_recvfile.read()))
finally:
# We do not use os.remove() because file could still be
# locked by ftpd thread. If DELE through FTP fails try
# os.remove() as last resort.
if os.path.exists(TESTFN):
try:
self.client.delete(TESTFN)
except (ftplib.Error, EOFError, socket.error):
safe_remove(TESTFN)
def test_stor_ascii_2(self):
# Test that no extra extra carriage returns are added to the
# file in ASCII mode in case CRLF gets truncated in two chunks
# (issue 116)
def store(cmd, fp, blocksize=8192):
# like storbinary() except it sends "type a" instead of
# "type i" before starting the transfer
self.client.voidcmd('type a')
conn = self.client.transfercmd(cmd)
while 1:
buf = fp.read(blocksize)
if not buf:
break
conn.sendall(buf)
conn.close()
return self.client.voidresp()
old_buffer = ftpserver.DTPHandler.ac_in_buffer_size
try:
# set a small buffer so that CRLF gets delivered in two
# separate chunks: "CRLF", " f", "oo", " CR", "LF", " b", "ar"
ftpserver.DTPHandler.ac_in_buffer_size = 2
data = '\r\n foo \r\n bar'
self.dummy_sendfile.write(data)
self.dummy_sendfile.seek(0)
store('stor ' + TESTFN, self.dummy_sendfile)
expected = data.replace('\r\n', os.linesep)
self.client.retrbinary('retr ' + TESTFN, self.dummy_recvfile.write)
self.dummy_recvfile.seek(0)
self.assertEqual(expected, self.dummy_recvfile.read())
finally:
ftpserver.DTPHandler.ac_in_buffer_size = old_buffer
# We do not use os.remove() because file could still be
# locked by ftpd thread. If DELE through FTP fails try
# os.remove() as last resort.
if os.path.exists(TESTFN):
try:
self.client.delete(TESTFN)
except (ftplib.Error, EOFError, socket.error):
safe_remove(TESTFN)
def test_stou(self):
data = 'abcde12345' * 100000
self.dummy_sendfile.write(data)
self.dummy_sendfile.seek(0)
self.client.voidcmd('TYPE I')
# filename comes in as "1xx FILE: <filename>"
filename = self.client.sendcmd('stou').split('FILE: ')[1]
try:
sock = self.client.makeport()
conn, sockaddr = sock.accept()
if hasattr(self.client_class, 'ssl_version'):
conn = ssl.wrap_socket(conn)
while 1:
buf = self.dummy_sendfile.read(8192)
if not buf:
break
conn.sendall(buf)
sock.close()
conn.close()
# transfer finished, a 226 response is expected
self.assertEqual('226', self.client.voidresp()[:3])
self.client.retrbinary('retr ' + filename, self.dummy_recvfile.write)
self.dummy_recvfile.seek(0)
self.assertEqual(hash(data), hash (self.dummy_recvfile.read()))
finally:
# We do not use os.remove() because file could still be
# locked by ftpd thread. If DELE through FTP fails try
# os.remove() as last resort.
if os.path.exists(filename):
try:
self.client.delete(filename)
except (ftplib.Error, EOFError, socket.error):
safe_remove(filename)
def test_stou_rest(self):
# Watch for STOU preceded by REST, which makes no sense.
self.client.sendcmd('type i')
self.client.sendcmd('rest 10')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'stou')
def test_stou_orphaned_file(self):
# Check that no orphaned file gets left behind when STOU fails.
# Even if STOU fails the file is first created and then erased.
# Since we can't know the name of the file the best way that
# we have to test this case is comparing the content of the
# directory before and after STOU has been issued.
# Assuming that TESTFN is supposed to be a "reserved" file
# name we shouldn't get false positives.
safe_remove(TESTFN)
# login as a limited user to let STOU fail
self.client.login('anonymous', '@nopasswd')
before = os.listdir(HOME)
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stou ' + TESTFN)
after = os.listdir(HOME)
if before != after:
for file in after:
self.assert_(not file.startswith(TESTFN))
def test_appe(self):
try:
data1 = 'abcde12345' * 100000
self.dummy_sendfile.write(data1)
self.dummy_sendfile.seek(0)
self.client.storbinary('stor ' + TESTFN, self.dummy_sendfile)
data2 = 'fghil67890' * 100000
self.dummy_sendfile.write(data2)
self.dummy_sendfile.seek(len(data1))
self.client.storbinary('appe ' + TESTFN, self.dummy_sendfile)
self.client.retrbinary("retr " + TESTFN, self.dummy_recvfile.write)
self.dummy_recvfile.seek(0)
self.assertEqual(hash(data1 + data2), hash (self.dummy_recvfile.read()))
finally:
# We do not use os.remove() because file could still be
# locked by ftpd thread. If DELE through FTP fails try
# os.remove() as last resort.
if os.path.exists(TESTFN):
try:
self.client.delete(TESTFN)
except (ftplib.Error, EOFError, socket.error):
safe_remove(TESTFN)
def test_appe_rest(self):
# Watch for APPE preceded by REST, which makes no sense.
self.client.sendcmd('type i')
self.client.sendcmd('rest 10')
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'appe x')
def test_rest_on_stor(self):
# Test STOR preceded by REST.
data = 'abcde12345' * 100000
self.dummy_sendfile.write(data)
self.dummy_sendfile.seek(0)
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('stor ' + TESTFN)
bytes_sent = 0
while 1:
chunk = self.dummy_sendfile.read(8192)
conn.sendall(chunk)
bytes_sent += len(chunk)
# stop transfer while it isn't finished yet
if bytes_sent >= 524288 or not chunk:
break
conn.close()
# transfer wasn't finished yet but server can't know this,
# hence expect a 226 response
self.assertEqual('226', self.client.voidresp()[:3])
# resuming transfer by using a marker value greater than the
# file size stored on the server should result in an error
# on stor
file_size = self.client.size(TESTFN)
self.assertEqual(file_size, bytes_sent)
self.client.sendcmd('rest %s' % ((file_size + 1)))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stor ' + TESTFN)
self.client.sendcmd('rest %s' % bytes_sent)
self.client.storbinary('stor ' + TESTFN, self.dummy_sendfile)
self.client.retrbinary('retr ' + TESTFN, self.dummy_recvfile.write)
self.dummy_sendfile.seek(0)
self.dummy_recvfile.seek(0)
self.assertEqual(hash(self.dummy_sendfile.read()),
hash(self.dummy_recvfile.read())
)
self.client.delete(TESTFN)
def test_failing_rest_on_stor(self):
# Test REST -> STOR against a non existing file.
if os.path.exists(TESTFN):
self.client.delete(TESTFN)
self.client.sendcmd('type i')
self.client.sendcmd('rest 10')
self.assertRaises(ftplib.error_perm, self.client.storbinary,
'stor ' + TESTFN, lambda x: x)
# if the first STOR failed because of REST, the REST marker
# is supposed to be resetted to 0
self.dummy_sendfile.write('x' * 4096)
self.dummy_sendfile.seek(0)
self.client.storbinary('stor ' + TESTFN, self.dummy_sendfile)
def test_quit_during_transfer(self):
# RFC-959 states that if QUIT is sent while a transfer is in
# progress, the connection must remain open for result response
# and the server will then close it.
conn = self.client.transfercmd('stor ' + TESTFN)
conn.sendall('abcde12345' * 50000)
self.client.sendcmd('quit')
conn.sendall('abcde12345' * 50000)
conn.close()
# expect the response (transfer ok)
self.assertEqual('226', self.client.voidresp()[:3])
# Make sure client has been disconnected.
# socket.error (Windows) or EOFError (Linux) exception is supposed
# to be raised in such a case.
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_stor_empty_file(self):
self.client.storbinary('stor ' + TESTFN, self.dummy_sendfile)
self.client.quit()
f = open(TESTFN)
self.assertEqual(f.read(), "")
f.close()
if SUPPORTS_SENDFILE:
class TestFtpStoreDataNoSendfile(TestFtpStoreData):
"""Test STOR, STOU, APPE, REST, TYPE not using sendfile()."""
def setUp(self):
TestFtpStoreData.setUp(self)
self.server.handler.use_sendfile = False
def tearDown(self):
TestFtpStoreData.tearDown(self)
self.server.handler.use_sendfile = True
class TestFtpRetrieveData(unittest.TestCase):
"Test RETR, REST, TYPE"
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
self.file = open(TESTFN, 'w+b')
self.dummyfile = StringIO.StringIO()
def tearDown(self):
self.client.close()
self.server.stop()
if not self.file.closed:
self.file.close()
if not self.dummyfile.closed:
self.dummyfile.close()
safe_remove(TESTFN)
def test_retr(self):
data = 'abcde12345' * 100000
self.file.write(data)
self.file.close()
self.client.retrbinary("retr " + TESTFN, self.dummyfile.write)
self.dummyfile.seek(0)
self.assertEqual(hash(data), hash(self.dummyfile.read()))
# attempt to retrieve a file which doesn't exist
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.retrbinary,
"retr " + bogus, lambda x: x)
def test_retr_ascii(self):
# Test RETR in ASCII mode.
def retrieve(cmd, callback, blocksize=8192, rest=None):
# like retrbinary but uses TYPE A instead
self.client.voidcmd('type a')
conn = self.client.transfercmd(cmd, rest)
while 1:
data = conn.recv(blocksize)
if not data:
break
callback(data)
conn.close()
return self.client.voidresp()
data = ('abcde12345' + os.linesep) * 100000
self.file.write(data)
self.file.close()
retrieve("retr " + TESTFN, self.dummyfile.write)
expected = data.replace(os.linesep, '\r\n')
self.dummyfile.seek(0)
self.assertEqual(hash(expected), hash(self.dummyfile.read()))
def test_restore_on_retr(self):
data = 'abcde12345' * 1000000
self.file.write(data)
self.file.close()
received_bytes = 0
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('retr ' + TESTFN)
while 1:
chunk = conn.recv(8192)
if not chunk:
break
self.dummyfile.write(chunk)
received_bytes += len(chunk)
if received_bytes >= len(data) // 2:
break
conn.close()
# transfer wasn't finished yet so we expect a 426 response
self.assertEqual(self.client.getline()[:3], "426")
# resuming transfer by using a marker value greater than the
# file size stored on the server should result in an error
# on retr (RFC-1123)
file_size = self.client.size(TESTFN)
self.client.sendcmd('rest %s' % ((file_size + 1)))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'retr ' + TESTFN)
# test resume
self.client.sendcmd('rest %s' % received_bytes)
self.client.retrbinary("retr " + TESTFN, self.dummyfile.write)
self.dummyfile.seek(0)
self.assertEqual(hash(data), hash (self.dummyfile.read()))
def test_retr_empty_file(self):
self.client.retrbinary("retr " + TESTFN, self.dummyfile.write)
self.dummyfile.seek(0)
self.assertEqual(self.dummyfile.read(), "")
if SUPPORTS_SENDFILE:
class TestFtpRetrieveDataNoSendfile(TestFtpRetrieveData):
"""Test RETR, REST, TYPE by not using sendfile()."""
def setUp(self):
TestFtpRetrieveData.setUp(self)
self.server.handler.use_sendfile = False
def tearDown(self):
TestFtpRetrieveData.tearDown(self)
self.server.handler.use_sendfile = True
class TestFtpListingCmds(unittest.TestCase):
"""Test LIST, NLST, argumented STAT."""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
touch(TESTFN)
def tearDown(self):
self.client.close()
self.server.stop()
os.remove(TESTFN)
def _test_listing_cmds(self, cmd):
"""Tests common to LIST NLST and MLSD commands."""
# assume that no argument has the same meaning of "/"
l1 = l2 = []
self.client.retrlines(cmd, l1.append)
self.client.retrlines(cmd + ' /', l2.append)
self.assertEqual(l1, l2)
if cmd.lower() != 'mlsd':
# if pathname is a file one line is expected
x = []
self.client.retrlines('%s ' % cmd + TESTFN, x.append)
self.assertEqual(len(x), 1)
self.assertTrue(''.join(x).endswith(TESTFN))
# non-existent path, 550 response is expected
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.retrlines,
'%s ' %cmd + bogus, lambda x: x)
# for an empty directory we excpect that the data channel is
# opened anyway and that no data is received
x = []
tempdir = os.path.basename(tempfile.mkdtemp(dir=HOME))
try:
self.client.retrlines('%s %s' % (cmd, tempdir), x.append)
self.assertEqual(x, [])
finally:
try:
os.rmdir(tempdir)
except OSError:
pass
def test_nlst(self):
# common tests
self._test_listing_cmds('nlst')
def test_list(self):
# common tests
self._test_listing_cmds('list')
# known incorrect pathname arguments (e.g. old clients) are
# expected to be treated as if pathname would be == '/'
l1 = l2 = l3 = l4 = l5 = []
self.client.retrlines('list /', l1.append)
self.client.retrlines('list -a', l2.append)
self.client.retrlines('list -l', l3.append)
self.client.retrlines('list -al', l4.append)
self.client.retrlines('list -la', l5.append)
tot = (l1, l2, l3, l4, l5)
for x in range(len(tot) - 1):
self.assertEqual(tot[x], tot[x+1])
def test_mlst(self):
# utility function for extracting the line of interest
mlstline = lambda cmd: self.client.voidcmd(cmd).split('\n')[1]
# the fact set must be preceded by a space
self.assertTrue(mlstline('mlst').startswith(' '))
# where TVFS is supported, a fully qualified pathname is expected
self.assertTrue(mlstline('mlst ' + TESTFN).endswith('/' + TESTFN))
self.assertTrue(mlstline('mlst').endswith('/'))
# assume that no argument has the same meaning of "/"
self.assertEqual(mlstline('mlst'), mlstline('mlst /'))
# non-existent path
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'mlst '+bogus)
# test file/dir notations
self.assertTrue('type=dir' in mlstline('mlst'))
self.assertTrue('type=file' in mlstline('mlst ' + TESTFN))
# let's add some tests for OPTS command
self.client.sendcmd('opts mlst type;')
self.assertEqual(mlstline('mlst'), ' type=dir; /')
# where no facts are present, two leading spaces before the
# pathname are required (RFC-3659)
self.client.sendcmd('opts mlst')
self.assertEqual(mlstline('mlst'), ' /')
def test_mlsd(self):
# common tests
self._test_listing_cmds('mlsd')
dir = os.path.basename(tempfile.mkdtemp(dir=HOME))
try:
try:
self.client.retrlines('mlsd ' + TESTFN, lambda x: x)
except ftplib.error_perm, resp:
# if path is a file a 501 response code is expected
self.assertEqual(str(resp)[0:3], "501")
else:
self.fail("Exception not raised")
finally:
safe_rmdir(dir)
def test_mlsd_all_facts(self):
feat = self.client.sendcmd('feat')
# all the facts
facts = re.search(r'^\s*MLST\s+(\S+)$', feat, re.MULTILINE).group(1)
facts = facts.replace("*;", ";")
self.client.sendcmd('opts mlst ' + facts)
resp = self.client.sendcmd('mlst')
local = facts[:-1].split(";")
returned = resp.split("\n")[1].strip()[:-3]
returned = [x.split("=")[0] for x in returned.split(";")]
self.assertEqual(sorted(local), sorted(returned))
self.assertTrue("type" in resp)
self.assertTrue("size" in resp)
self.assertTrue("perm" in resp)
self.assertTrue("modify" in resp)
if os.name == 'posix':
self.assertTrue("unique" in resp)
self.assertTrue("unix.mode" in resp)
self.assertTrue("unix.uid" in resp)
self.assertTrue("unix.gid" in resp)
elif os.name == 'nt':
self.assertTrue("create" in resp)
def test_stat(self):
# Test STAT provided with argument which is equal to LIST
self.client.sendcmd('stat /')
self.client.sendcmd('stat ' + TESTFN)
self.client.putcmd('stat *')
resp = self.client.getmultiline()
self.assertEqual(resp, '550 Globbing not supported.')
bogus = os.path.basename(tempfile.mktemp(dir=HOME))
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'stat ' + bogus)
def test_unforeseen_time_event(self):
# Emulate a case where the file last modification time is prior
# to year 1900. This most likely will never happen unless
# someone specifically force the last modification time of a
# file in some way.
# To do so we temporarily override os.path.getmtime so that it
# returns a negative value referring to a year prior to 1900.
# It causes time.localtime/gmtime to raise a ValueError exception
# which is supposed to be handled by server.
_getmtime = ftpserver.AbstractedFS.getmtime
try:
ftpserver.AbstractedFS.getmtime = lambda x, y: -9000000000
self.client.sendcmd('stat /') # test AbstractedFS.format_list()
self.client.sendcmd('mlst /') # test AbstractedFS.format_mlsx()
# make sure client hasn't been disconnected
self.client.sendcmd('noop')
finally:
ftpserver.AbstractedFS.getmtime = _getmtime
class TestFtpAbort(unittest.TestCase):
"test: ABOR"
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
self.server.stop()
def test_abor_no_data(self):
# Case 1: ABOR while no data channel is opened: respond with 225.
resp = self.client.sendcmd('ABOR')
self.assertEqual('225 No transfer to abort.', resp)
self.client.retrlines('list', [].append)
def test_abor_pasv(self):
# Case 2: user sends a PASV, a data-channel socket is listening
# but not connected, and ABOR is sent: close listening data
# socket, respond with 225.
self.client.makepasv()
respcode = self.client.sendcmd('ABOR')[:3]
self.assertEqual('225', respcode)
self.client.retrlines('list', [].append)
def test_abor_port(self):
# Case 3: data channel opened with PASV or PORT, but ABOR sent
# before a data transfer has been started: close data channel,
# respond with 225
self.client.set_pasv(0)
sock = self.client.makeport()
respcode = self.client.sendcmd('ABOR')[:3]
sock.close()
self.assertEqual('225', respcode)
self.client.retrlines('list', [].append)
def test_abor_during_transfer(self):
# Case 4: ABOR while a data transfer on DTP channel is in
# progress: close data channel, respond with 426, respond
# with 226.
data = 'abcde12345' * 1000000
f = open(TESTFN, 'w+b')
f.write(data)
f.close()
try:
self.client.voidcmd('TYPE I')
conn = self.client.transfercmd('retr ' + TESTFN)
bytes_recv = 0
while bytes_recv < 65536:
chunk = conn.recv(8192)
bytes_recv += len(chunk)
# stop transfer while it isn't finished yet
self.client.putcmd('ABOR')
# transfer isn't finished yet so ftpd should respond with 426
self.assertEqual(self.client.getline()[:3], "426")
# transfer successfully aborted, so should now respond with a 226
self.assertEqual('226', self.client.voidresp()[:3])
finally:
# We do not use os.remove() because file could still be
# locked by ftpd thread. If DELE through FTP fails try
# os.remove() as last resort.
try:
self.client.delete(TESTFN)
except (ftplib.Error, EOFError, socket.error):
safe_remove(TESTFN)
if hasattr(socket, 'MSG_OOB'):
def test_oob_abor(self):
# Send ABOR by following the RFC-959 directives of sending
# Telnet IP/Synch sequence as OOB data.
# On some systems like FreeBSD this happened to be a problem
# due to a different SO_OOBINLINE behavior.
# On some platforms (e.g. Python CE) the test may fail
# although the MSG_OOB constant is defined.
self.client.sock.sendall(chr(244), socket.MSG_OOB)
self.client.sock.sendall(chr(255), socket.MSG_OOB)
self.client.sock.sendall('abor\r\n')
self.client.sock.settimeout(1)
self.assertEqual(self.client.getresp()[:3], '225')
class TestTimeouts(unittest.TestCase):
"""Test idle-timeout capabilities of control and data channels.
Some tests may fail on slow machines.
"""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = None
self.client = None
def _setUp(self, idle_timeout=300, data_timeout=300, pasv_timeout=30,
port_timeout=30):
self.server = self.server_class()
self.server.handler.timeout = idle_timeout
self.server.handler.dtp_handler.timeout = data_timeout
self.server.handler.passive_dtp.timeout = pasv_timeout
self.server.handler.active_dtp.timeout = port_timeout
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
def tearDown(self):
if self.client is not None and self.server is not None:
self.client.close()
self.server.handler.timeout = 300
self.server.handler.dtp_handler.timeout = 300
self.server.handler.passive_dtp.timeout = 30
self.server.handler.active_dtp.timeout = 30
self.server.stop()
def test_idle_timeout(self):
# Test control channel timeout. The client which does not send
# any command within the time specified in FTPHandler.timeout is
# supposed to be kicked off.
self._setUp(idle_timeout=0.1)
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Control connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_data_timeout(self):
# Test data channel timeout. The client which does not send
# or receive any data within the time specified in
# DTPHandler.timeout is supposed to be kicked off.
self._setUp(data_timeout=0.1)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Data connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_data_timeout_not_reached(self):
# Impose a timeout for the data channel, then keep sending data for a
# time which is longer than that to make sure that the code checking
# whether the transfer stalled for with no progress is executed.
self._setUp(data_timeout=0.1)
sock = self.client.transfercmd('stor ' + TESTFN)
if hasattr(self.client_class, 'ssl_version'):
sock = ssl.wrap_socket(sock)
try:
stop_at = time.time() + 0.2
while time.time() < stop_at:
sock.send('x' * 1024)
sock.close()
self.client.voidresp()
finally:
if os.path.exists(TESTFN):
self.client.delete(TESTFN)
def test_idle_data_timeout1(self):
# Tests that the control connection timeout is suspended while
# the data channel is opened
self._setUp(idle_timeout=0.1, data_timeout=0.2)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Data connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_idle_data_timeout2(self):
# Tests that the control connection timeout is restarted after
# data channel has been closed
self._setUp(idle_timeout=0.1, data_timeout=0.2)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
# close data channel
self.client.sendcmd('abor')
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Control connection timed out.\r\n")
# ensure client has been kicked off
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_pasv_timeout(self):
# Test pasv data channel timeout. The client which does not
# connect to the listening data socket within the time specified
# in PassiveDTP.timeout is supposed to receive a 421 response.
self._setUp(pasv_timeout=0.1)
self.client.makepasv()
# fail if no msg is received within 1 second
self.client.sock.settimeout(1)
data = self.client.sock.recv(1024)
self.assertEqual(data, "421 Passive data channel timed out.\r\n")
# client is not expected to be kicked off
self.client.sendcmd('noop')
def test_disabled_idle_timeout(self):
self._setUp(idle_timeout=0)
self.client.sendcmd('noop')
def test_disabled_data_timeout(self):
self._setUp(data_timeout=0)
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
s.close()
def test_disabled_pasv_timeout(self):
self._setUp(pasv_timeout=0)
self.client.makepasv()
# reset passive socket
addr = self.client.makepasv()
s = socket.socket()
s.connect(addr)
s.close()
def test_disabled_port_timeout(self):
self._setUp(port_timeout=0)
s1 =self.client.makeport()
s2 = self.client.makeport()
s1.close()
s2.close()
class TestConfigurableOptions(unittest.TestCase):
"""Test those daemon options which are commonly modified by user."""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
touch(TESTFN)
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
def tearDown(self):
os.remove(TESTFN)
# set back options to their original value
self.server.server.max_cons = 0
self.server.server.max_cons_per_ip = 0
self.server.handler.banner = "pyftpdlib %s ready." % ftpserver.__ver__
self.server.handler.max_login_attempts = 3
self.server.handler._auth_failed_timeout = 5
self.server.handler.masquerade_address = None
self.server.handler.masquerade_address_map = {}
self.server.handler.permit_privileged_ports = False
self.server.handler.passive_ports = None
self.server.handler.use_gmt_times = True
self.server.handler.tcp_no_delay = hasattr(socket, 'TCP_NODELAY')
self.server.stop()
def test_max_connections(self):
# Test FTPServer.max_cons attribute
self.server.server.max_cons = 3
self.client.quit()
c1 = self.client_class()
c2 = self.client_class()
c3 = self.client_class()
try:
c1.connect(self.server.host, self.server.port)
c2.connect(self.server.host, self.server.port)
self.assertRaises(ftplib.error_temp, c3.connect, self.server.host,
self.server.port)
# with passive data channel established
c2.quit()
c1.login(USER, PASSWD)
c1.makepasv()
self.assertRaises(ftplib.error_temp, c2.connect, self.server.host,
self.server.port)
# with passive data socket waiting for connection
c1.login(USER, PASSWD)
c1.sendcmd('pasv')
self.assertRaises(ftplib.error_temp, c2.connect, self.server.host,
self.server.port)
# with active data channel established
c1.login(USER, PASSWD)
sock = c1.makeport()
self.assertRaises(ftplib.error_temp, c2.connect, self.server.host,
self.server.port)
sock.close()
finally:
c1.close()
c2.close()
c3.close()
def test_max_connections_per_ip(self):
# Test FTPServer.max_cons_per_ip attribute
self.server.server.max_cons_per_ip = 3
self.client.quit()
c1 = self.client_class()
c2 = self.client_class()
c3 = self.client_class()
c4 = self.client_class()
try:
c1.connect(self.server.host, self.server.port)
c2.connect(self.server.host, self.server.port)
c3.connect(self.server.host, self.server.port)
self.assertRaises(ftplib.error_temp, c4.connect, self.server.host,
self.server.port)
# Make sure client has been disconnected.
# socket.error (Windows) or EOFError (Linux) exception is
# supposed to be raised in such a case.
self.assertRaises((socket.error, EOFError), c4.sendcmd, 'noop')
finally:
c1.close()
c2.close()
c3.close()
c4.close()
def test_banner(self):
# Test FTPHandler.banner attribute
self.server.handler.banner = 'hello there'
self.client.close()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.assertEqual(self.client.getwelcome()[4:], 'hello there')
def test_max_login_attempts(self):
# Test FTPHandler.max_login_attempts attribute.
self.server.handler.max_login_attempts = 1
self.server.handler._auth_failed_timeout = 0
self.assertRaises(ftplib.error_perm, self.client.login, 'wrong', 'wrong')
# socket.error (Windows) or EOFError (Linux) exceptions are
# supposed to be raised when attempting to send/recv some data
# using a disconnected socket
self.assertRaises((socket.error, EOFError), self.client.sendcmd, 'noop')
def test_masquerade_address(self):
# Test FTPHandler.masquerade_address attribute
host, port = self.client.makepasv()
self.assertEqual(host, self.server.host)
self.server.handler.masquerade_address = "256.256.256.256"
host, port = self.client.makepasv()
self.assertEqual(host, "256.256.256.256")
def test_masquerade_address_map(self):
# Test FTPHandler.masquerade_address_map attribute
host, port = self.client.makepasv()
self.assertEqual(host, self.server.host)
self.server.handler.masquerade_address_map = {self.server.host :
"128.128.128.128"}
host, port = self.client.makepasv()
self.assertEqual(host, "128.128.128.128")
def test_passive_ports(self):
# Test FTPHandler.passive_ports attribute
_range = range(40000, 60000, 200)
self.server.handler.passive_ports = _range
self.assert_(self.client.makepasv()[1] in _range)
self.assert_(self.client.makepasv()[1] in _range)
self.assert_(self.client.makepasv()[1] in _range)
self.assert_(self.client.makepasv()[1] in _range)
def test_passive_ports_busy(self):
# If the ports in the configured range are busy it is expected
# that a kernel-assigned port gets chosen
s = socket.socket()
s.bind((HOST, 0))
port = s.getsockname()[1]
self.server.handler.passive_ports = [port]
resulting_port = self.client.makepasv()[1]
self.assert_(port != resulting_port)
def test_permit_privileged_ports(self):
# Test FTPHandler.permit_privileged_ports_active attribute
# try to bind a socket on a privileged port
sock = None
for port in reversed(range(1, 1024)):
try:
socket.getservbyport(port)
except socket.error, err:
# not registered port; go on
try:
sock = socket.socket(self.client.af, socket.SOCK_STREAM)
sock.bind((HOST, port))
break
except socket.error, err:
if err.args[0] == errno.EACCES:
# root privileges needed
sock = None
break
sock.close()
continue
else:
# registered port found; skip to the next one
continue
else:
# no usable privileged port was found
sock = None
try:
self.server.handler.permit_privileged_ports = False
self.assertRaises(ftplib.error_perm, self.client.sendport, HOST,
port)
if sock:
port = sock.getsockname()[1]
self.server.handler.permit_privileged_ports = True
sock.listen(5)
sock.settimeout(2)
self.client.sendport(HOST, port)
sock.accept()
finally:
if sock is not None:
sock.close()
def test_use_gmt_times(self):
# use GMT time
self.server.handler.use_gmt_times = True
gmt1 = self.client.sendcmd('mdtm ' + TESTFN)
gmt2 = self.client.sendcmd('mlst ' + TESTFN)
gmt3 = self.client.sendcmd('stat ' + TESTFN)
# use local time
self.server.handler.use_gmt_times = False
self.client.quit()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
loc1 = self.client.sendcmd('mdtm ' + TESTFN)
loc2 = self.client.sendcmd('mlst ' + TESTFN)
loc3 = self.client.sendcmd('stat ' + TESTFN)
# if we're not in a GMT time zone times are supposed to be
# different
if time.timezone != 0:
self.assertNotEqual(gmt1, loc1)
self.assertNotEqual(gmt2, loc2)
self.assertNotEqual(gmt3, loc3)
# ...otherwise they should be the same
else:
self.assertEqual(gmt1, loc1)
self.assertEqual(gmt2, loc2)
self.assertEqual(gmt3, loc3)
if hasattr(socket, 'TCP_NODELAY'):
def test_tcp_no_delay(self):
def get_handler_socket():
# return the server's handler socket object
for fd in asyncore.socket_map:
instance = asyncore.socket_map[fd]
if isinstance(instance, ftpserver.FTPHandler):
break
return instance.socket
s = get_handler_socket()
self.assertTrue(s.getsockopt(socket.SOL_TCP, socket.TCP_NODELAY))
self.client.quit()
self.server.handler.tcp_no_delay = False
self.client.connect(self.server.host, self.server.port)
self.client.sendcmd('noop')
s = get_handler_socket()
self.assertFalse(s.getsockopt(socket.SOL_TCP, socket.TCP_NODELAY))
class TestCallbacks(unittest.TestCase):
"""Test FTPHandler class callback methods."""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.client = None
self.server = None
self._tearDown = True
def _setUp(self, handler, login=True):
FTPd.handler = handler
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
if login:
self.client.login(USER, PASSWD)
self.file = open(TESTFN, 'w+b')
self.dummyfile = StringIO.StringIO()
self._tearDown = False
def tearDown(self):
if not self._tearDown:
FTPd.handler = ftpserver.FTPHandler
self._tearDown = True
if self.client is not None:
self.client.close()
if self.server is not None:
self.server.stop()
if not self.file.closed:
self.file.close()
if not self.dummyfile.closed:
self.dummyfile.close()
os.remove(TESTFN)
def test_on_file_sent(self):
_file = []
class TestHandler(ftpserver.FTPHandler):
def on_file_sent(self, file):
_file.append(file)
def on_file_received(self, file):
raise Exception
def on_incomplete_file_sent(self, file):
raise Exception
def on_incomplete_file_received(self, file):
raise Exception
self._setUp(TestHandler)
data = 'abcde12345' * 100000
self.file.write(data)
self.file.close()
self.client.retrbinary("retr " + TESTFN, lambda x: x)
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(_file, [os.path.abspath(TESTFN)])
def test_on_file_received(self):
_file = []
class TestHandler(ftpserver.FTPHandler):
def on_file_sent(self, file):
raise Exception
def on_file_received(self, file):
_file.append(file)
def on_incomplete_file_sent(self, file):
raise Exception
def on_incomplete_file_received(self, file):
raise Exception
self._setUp(TestHandler)
data = 'abcde12345' * 100000
self.dummyfile.write(data)
self.dummyfile.seek(0)
self.client.storbinary('stor ' + TESTFN, self.dummyfile)
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(_file, [os.path.abspath(TESTFN)])
def test_on_incomplete_file_sent(self):
_file = []
class TestHandler(ftpserver.FTPHandler):
def on_file_sent(self, file):
raise Exception
def on_file_received(self, file):
raise Exception
def on_incomplete_file_sent(self, file):
_file.append(file)
def on_incomplete_file_received(self, file):
raise Exception
self._setUp(TestHandler)
data = 'abcde12345' * 100000
self.file.write(data)
self.file.close()
bytes_recv = 0
conn = self.client.transfercmd("retr " + TESTFN, None)
while 1:
chunk = conn.recv(8192)
bytes_recv += len(chunk)
if bytes_recv >= 524288 or not chunk:
break
conn.close()
self.assertEqual(self.client.getline()[:3], "426")
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(_file, [os.path.abspath(TESTFN)])
def test_on_incomplete_file_received(self):
_file = []
class TestHandler(ftpserver.FTPHandler):
def on_file_sent(self, file):
raise Exception
def on_file_received(self, file):
raise Exception
def on_incomplete_file_sent(self, file):
raise Exception
def on_incomplete_file_received(self, file):
_file.append(file)
self._setUp(TestHandler)
data = 'abcde12345' * 100000
self.dummyfile.write(data)
self.dummyfile.seek(0)
conn = self.client.transfercmd('stor ' + TESTFN)
bytes_sent = 0
while 1:
chunk = self.dummyfile.read(8192)
conn.sendall(chunk)
bytes_sent += len(chunk)
# stop transfer while it isn't finished yet
if bytes_sent >= 524288 or not chunk:
self.client.putcmd('abor')
break
conn.close()
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(_file, [os.path.abspath(TESTFN)])
def test_on_login(self):
user = []
class TestHandler(ftpserver.FTPHandler):
_auth_failed_timeout = 0
def on_login(self, username):
user.append(username)
def on_login_failed(self, username, password):
raise Exception
self._setUp(TestHandler)
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(user, [USER])
def test_on_login_failed(self):
pair = []
class TestHandler(ftpserver.FTPHandler):
_auth_failed_timeout = 0
def on_login(self, username):
raise Exception
def on_login_failed(self, username, password):
pair.append((username, password))
self._setUp(TestHandler, login=False)
self.assertRaises(ftplib.error_perm, self.client.login, 'foo', 'bar')
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(pair, [('foo', 'bar')])
def test_on_login_failed(self):
pair = []
class TestHandler(ftpserver.FTPHandler):
_auth_failed_timeout = 0
def on_login(self, username):
raise Exception
def on_login_failed(self, username, password):
pair.append((username, password))
self._setUp(TestHandler, login=False)
self.assertRaises(ftplib.error_perm, self.client.login, 'foo', 'bar')
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(pair, [('foo', 'bar')])
def test_on_logout_quit(self):
user = []
class TestHandler(ftpserver.FTPHandler):
def on_logout(self, username):
user.append(username)
self._setUp(TestHandler)
self.client.quit()
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(user, [USER])
def test_on_logout_rein(self):
user = []
class TestHandler(ftpserver.FTPHandler):
def on_logout(self, username):
user.append(username)
self._setUp(TestHandler)
self.client.sendcmd('rein')
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(user, [USER])
def test_on_logout_user_issued_twice(self):
users = []
class TestHandler(ftpserver.FTPHandler):
def on_logout(self, username):
users.append(username)
self._setUp(TestHandler)
# At this point user "user" is logged in. Re-login as anonymous,
# then quit and expect queue == ["user", "anonymous"]
self.client.login("anonymous")
self.client.quit()
# shut down the server to avoid race conditions
self.tearDown()
self.assertEqual(users, [USER, 'anonymous'])
class _TestNetworkProtocols(unittest.TestCase):
"""Test PASV, EPSV, PORT and EPRT commands.
Do not use this class directly, let TestIPv4Environment and
TestIPv6Environment classes use it instead.
"""
server_class = FTPd
client_class = ftplib.FTP
HOST = HOST
def setUp(self):
self.server = self.server_class(self.HOST)
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
if self.client.af == socket.AF_INET:
self.proto = "1"
self.other_proto = "2"
else:
self.proto = "2"
self.other_proto = "1"
def tearDown(self):
self.client.close()
self.server.stop()
def cmdresp(self, cmd):
"""Send a command and return response, also if the command failed."""
try:
return self.client.sendcmd(cmd)
except ftplib.Error, err:
return str(err)
def test_eprt(self):
# test wrong proto
try:
self.client.sendcmd('eprt |%s|%s|%s|' % (self.other_proto,
self.server.host, self.server.port))
except ftplib.error_perm, err:
self.assertEqual(str(err)[0:3], "522")
else:
self.fail("Exception not raised")
# test bad args
msg = "501 Invalid EPRT format."
# len('|') > 3
self.assertEqual(self.cmdresp('eprt ||||'), msg)
# len('|') < 3
self.assertEqual(self.cmdresp('eprt ||'), msg)
# port > 65535
self.assertEqual(self.cmdresp('eprt |%s|%s|65536|' % (self.proto,
self.HOST)), msg)
# port < 0
self.assertEqual(self.cmdresp('eprt |%s|%s|-1|' % (self.proto,
self.HOST)), msg)
# port < 1024
self.assertEqual(self.cmdresp('eprt |%s|%s|222|' % (self.proto,
self.HOST)), "501 Can't connect over a privileged port.")
# proto > 2
_cmd = 'eprt |3|%s|%s|' % (self.server.host, self.server.port)
self.assertRaises(ftplib.error_perm, self.client.sendcmd, _cmd)
if self.proto == '1':
# len(ip.octs) > 4
self.assertEqual(self.cmdresp('eprt |1|1.2.3.4.5|2048|'), msg)
# ip.oct > 255
self.assertEqual(self.cmdresp('eprt |1|1.2.3.256|2048|'), msg)
# bad proto
resp = self.cmdresp('eprt |2|1.2.3.256|2048|')
self.assert_("Network protocol not supported" in resp)
# test connection
sock = socket.socket(self.client.af)
sock.bind((self.client.sock.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
ip, port = sock.getsockname()[:2]
self.client.sendcmd('eprt |%s|%s|%s|' % (self.proto, ip, port))
try:
try:
sock.accept()
except socket.timeout:
self.fail("Server didn't connect to passive socket")
finally:
sock.close()
def test_epsv(self):
# test wrong proto
try:
self.client.sendcmd('epsv ' + self.other_proto)
except ftplib.error_perm, err:
self.assertEqual(str(err)[0:3], "522")
else:
self.fail("Exception not raised")
# proto > 2
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'epsv 3')
# test connection
for cmd in ('EPSV', 'EPSV ' + self.proto):
host, port = ftplib.parse229(self.client.sendcmd(cmd),
self.client.sock.getpeername())
s = socket.socket(self.client.af, socket.SOCK_STREAM)
s.settimeout(2)
try:
s.connect((host, port))
self.client.sendcmd('abor')
finally:
s.close()
def test_epsv_all(self):
self.client.sendcmd('epsv all')
self.assertRaises(ftplib.error_perm, self.client.sendcmd, 'pasv')
self.assertRaises(ftplib.error_perm, self.client.sendport, self.HOST, 2000)
self.assertRaises(ftplib.error_perm, self.client.sendcmd,
'eprt |%s|%s|%s|' % (self.proto, self.HOST, 2000))
class TestIPv4Environment(_TestNetworkProtocols):
"""Test PASV, EPSV, PORT and EPRT commands.
Runs tests contained in _TestNetworkProtocols class by using IPv4
plus some additional specific tests.
"""
server_class = FTPd
client_class = ftplib.FTP
HOST = '127.0.0.1'
def test_port_v4(self):
# test connection
sock = self.client.makeport()
self.client.sendcmd('abor')
sock.close()
# test bad arguments
ae = self.assertEqual
msg = "501 Invalid PORT format."
ae(self.cmdresp('port 127,0,0,1,1.1'), msg) # sep != ','
ae(self.cmdresp('port X,0,0,1,1,1'), msg) # value != int
ae(self.cmdresp('port 127,0,0,1,1,1,1'), msg) # len(args) > 6
ae(self.cmdresp('port 127,0,0,1'), msg) # len(args) < 6
ae(self.cmdresp('port 256,0,0,1,1,1'), msg) # oct > 255
ae(self.cmdresp('port 127,0,0,1,256,1'), msg) # port > 65535
ae(self.cmdresp('port 127,0,0,1,-1,0'), msg) # port < 0
msg = "501 Can't connect over a privileged port."
ae(self.cmdresp('port %s,1,1' % self.HOST.replace('.',',')),msg) # port < 1024
if "1.2.3.4" != self.HOST:
msg = "501 Can't connect to a foreign address."
ae(self.cmdresp('port 1,2,3,4,4,4'), msg)
def test_eprt_v4(self):
self.assertEqual(self.cmdresp('eprt |1|0.10.10.10|2222|'),
"501 Can't connect to a foreign address.")
def test_pasv_v4(self):
host, port = ftplib.parse227(self.client.sendcmd('pasv'))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(2)
try:
s.connect((host, port))
finally:
s.close()
class TestIPv6Environment(_TestNetworkProtocols):
"""Test PASV, EPSV, PORT and EPRT commands.
Runs tests contained in _TestNetworkProtocols class by using IPv6
plus some additional specific tests.
"""
server_class = FTPd
client_class = ftplib.FTP
HOST = '::1'
def test_port_v6(self):
# PORT is not supposed to work
self.assertRaises(ftplib.error_perm, self.client.sendport,
self.server.host, self.server.port)
def test_pasv_v6(self):
# PASV is still supposed to work to support clients using
# IPv4 connecting to a server supporting both IPv4 and IPv6
self.client.makepasv()
def test_eprt_v6(self):
self.assertEqual(self.cmdresp('eprt |2|::foo|2222|'),
"501 Can't connect to a foreign address.")
class TestIPv6MixedEnvironment(unittest.TestCase):
"""By running the server by specifying "::" as IP address the
server is supposed to listen on all interfaces, supporting both
IPv4 and IPv6 by using a single socket.
What we are going to do here is starting the server in this
manner and try to connect by using an IPv4 client.
"""
server_class = FTPd
client_class = ftplib.FTP
HOST = "::"
def setUp(self):
self.server = self.server_class(self.HOST)
self.server.start()
self.client = None
def tearDown(self):
if self.client is not None:
self.client.close()
self.server.stop()
def test_port_v4(self):
noop = lambda x: x
self.client = self.client_class()
self.client.connect('127.0.0.1', self.server.port)
self.client.set_pasv(False)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
self.client.retrlines('list', noop)
def test_pasv_v4(self):
noop = lambda x: x
self.client = self.client_class()
self.client.connect('127.0.0.1', self.server.port)
self.client.set_pasv(True)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
self.client.retrlines('list', noop)
# make sure pasv response doesn't return an IPv4-mapped address
ip = self.client.makepasv()[0]
self.assertFalse(ip.startswith("::ffff:"))
class TestCornerCases(unittest.TestCase):
"""Tests for any kind of strange situation for the server to be in,
mainly referring to bugs signaled on the bug tracker.
"""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
def tearDown(self):
self.client.close()
if self.server.running:
self.server.stop()
def test_port_race_condition(self):
# Refers to bug #120, first sends PORT, then disconnects the
# control channel before accept()ing the incoming data connection.
# The original server behavior was to reply with "200 Active
# data connection established" *after* the client had already
# disconnected the control connection.
sock = socket.socket(self.client.af)
sock.bind((self.client.sock.getsockname()[0], 0))
sock.listen(5)
sock.settimeout(2)
host, port = sock.getsockname()[:2]
hbytes = host.split('.')
pbytes = [repr(port // 256), repr(port % 256)]
bytes = hbytes + pbytes
cmd = 'PORT ' + ','.join(bytes)
self.client.sock.sendall(cmd + '\r\n')
self.client.quit()
sock.accept()
sock.close()
def test_stou_max_tries(self):
# Emulates case where the max number of tries to find out a
# unique file name when processing STOU command gets hit.
class TestFS(ftpserver.AbstractedFS):
def mkstemp(self, *args, **kwargs):
raise IOError(errno.EEXIST, "No usable temporary file name found")
self.server.handler.abstracted_fs = TestFS
try:
self.client.quit()
self.client.connect(self.server.host, self.server.port)
self.client.login(USER, PASSWD)
self.assertRaises(ftplib.error_temp, self.client.sendcmd, 'stou')
finally:
self.server.handler.abstracted_fs = ftpserver.AbstractedFS
def test_quick_connect(self):
# Clients that connected and disconnected quickly could cause
# the server to crash, due to a failure to catch errors in the
# initial part of the connection process.
# Tracked in issues #91, #104 and #105.
# See also https://bugs.launchpad.net/zodb/+bug/135108
import struct
def connect(addr):
s = socket.socket()
s.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack('ii', 1, 0))
try:
s.connect(addr)
except socket.error:
pass
s.close()
for x in xrange(10):
connect((self.server.host, self.server.port))
for x in xrange(10):
addr = self.client.makepasv()
connect(addr)
def test_error_on_callback(self):
# test that the server do not crash in case an error occurs
# while firing a scheduled function
self.tearDown()
flag = []
original_logerror = ftpserver.logerror
ftpserver.logerror = lambda msg: flag.append(msg)
server = ftpserver.FTPServer((HOST, 0), ftpserver.FTPHandler)
try:
len1 = len(asyncore.socket_map)
ftpserver.CallLater(0, lambda: 1 // 0)
server.serve_forever(timeout=0, count=1)
len2 = len(asyncore.socket_map)
self.assertEqual(len1, len2)
self.assertTrue(flag)
finally:
ftpserver.logerror = original_logerror
server.close()
def test_active_conn_error(self):
# we open a socket() but avoid to invoke accept() to
# reproduce this error condition:
# http://code.google.com/p/pyftpdlib/source/detail?r=905
sock = socket.socket()
sock.bind((HOST, 0))
port = sock.getsockname()[1]
self.client.sock.settimeout(.1)
try:
resp = self.client.sendport(HOST, port)
except ftplib.error_temp, err:
self.assertEqual(str(err)[:3], '425')
except socket.timeout:
pass
else:
self.assertNotEqual(str(resp)[:3], '200')
class TestUnicodePathNames(unittest.TestCase):
"""Test FTP commands and responses by using path names with non
ASCII characters.
"""
server_class = FTPd
client_class = ftplib.FTP
def setUp(self):
self.server = self.server_class()
self.server.start()
self.client = self.client_class()
self.client.connect(self.server.host, self.server.port)
self.client.sock.settimeout(2)
self.client.login(USER, PASSWD)
self.tempfile = os.path.basename(touch(TESTFN + '☃'))
self.tempdir = TESTFN + '☺'
os.mkdir(self.tempdir)
def tearDown(self):
self.client.close()
self.server.stop()
safe_remove(self.tempfile)
if os.path.exists(self.tempdir):
shutil.rmtree(self.tempdir)
# --- fs operations
def test_cwd(self):
resp = self.client.cwd(self.tempdir)
self.assertTrue(self.tempdir in resp)
def test_mkd(self):
os.rmdir(self.tempdir)
dirname = self.client.mkd(self.tempdir)
self.assertEqual(dirname, '/' + self.tempdir)
self.assertTrue(os.path.isdir(self.tempdir))
def test_rmdir(self):
self.client.rmd(self.tempdir)
def test_dele(self):
self.client.delete(self.tempfile)
self.assertFalse(os.path.exists(self.tempfile))
def test_rnfr_rnto(self):
tempname = TESTFN + '♥'
try:
# rename file
self.client.rename(self.tempfile, tempname)
self.assertTrue(os.path.isfile(tempname))
self.client.rename(tempname, self.tempfile)
# rename dir
self.client.rename(self.tempdir, tempname)
self.assertTrue(os.path.isdir(tempname))
self.client.rename(tempname, self.tempdir)
finally:
safe_remove(tempname)
safe_rmdir(tempname)
def test_size(self):
self.client.sendcmd('type i')
self.client.sendcmd('size ' + self.tempfile)
def test_mdtm(self):
self.client.sendcmd('mdtm ' + self.tempfile)
def test_stou(self):
resp = self.client.sendcmd('stou ' + self.tempfile)
try:
self.assertTrue(self.tempfile in resp)
self.client.quit()
finally:
os.remove(resp.rsplit(' ', 1)[1])
if hasattr(os, 'chmod'):
def test_site_chmod(self):
self.client.sendcmd('site chmod 777 ' + self.tempfile)
# --- listing cmds
def _test_listing_cmds(self, cmd):
ls = []
touch(os.path.join(self.tempdir, self.tempfile))
self.client.retrlines("%s %s" % (cmd, self.tempdir), ls.append)
self.assertTrue(self.tempfile in ls[0])
def test_list(self):
self._test_listing_cmds('list')
def test_nlst(self):
self._test_listing_cmds('nlst')
def test_mlsd(self):
self._test_listing_cmds('mlsd')
def test_mlst(self):
# utility function for extracting the line of interest
mlstline = lambda cmd: self.client.voidcmd(cmd).split('\n')[1]
self.assertTrue('type=dir' in mlstline('mlst ' + self.tempdir))
self.assertTrue('/' + self.tempdir in mlstline('mlst ' + self.tempdir))
self.assertTrue('type=file' in mlstline('mlst ' + self.tempfile))
self.assertTrue('/' + self.tempfile in mlstline('mlst ' + self.tempfile))
# --- file transfer
def test_stor(self):
data = 'abcde12345' * 500
os.remove(self.tempfile)
dummy = StringIO.StringIO()
dummy.write(data)
dummy.seek(0)
self.client.storbinary('stor ' + self.tempfile, dummy)
dummy_recv = StringIO.StringIO()
self.client.retrbinary('retr ' + self.tempfile, dummy_recv.write)
dummy_recv.seek(0)
self.assertEqual(dummy_recv.read(), data)
def test_retr(self):
data = 'abcd1234' * 500
f = open(self.tempfile, 'wb')
f.write(data)
f.close()
dummy = StringIO.StringIO()
self.client.retrbinary('retr ' + self.tempfile, dummy.write)
dummy.seek(0)
self.assertEqual(dummy.read(), data)
# XXX - provisional
def test_encode_decode(self):
self.client.sendcmd('type i')
self.client.sendcmd('size ' + self.tempfile.decode('utf8').encode('utf8'))
class TestCommandLineParser(unittest.TestCase):
"""Test command line parser."""
SYSARGV = sys.argv
STDERR = sys.stderr
def setUp(self):
class DummyFTPServer(ftpserver.FTPServer):
"""An overridden version of FTPServer class which forces
serve_forever() to return immediately.
"""
def serve_forever(self, *args, **kwargs):
return
self.devnull = StringIO.StringIO()
sys.argv = self.SYSARGV[:]
sys.stderr = self.STDERR
self.original_ftpserver_class = ftpserver.FTPServer
ftpserver.FTPServer = DummyFTPServer
def tearDown(self):
self.devnull.close()
sys.argv = self.SYSARGV[:]
sys.stderr = self.STDERR
ftpserver.FTPServer = self.original_ftpserver_class
safe_rmdir(TESTFN)
def test_a_option(self):
sys.argv += ["-i", "localhost", "-p", "0"]
ftpserver.main()
sys.argv = self.SYSARGV[:]
# no argument
sys.argv += ["-a"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, ftpserver.main)
def test_p_option(self):
sys.argv += ["-p", "0"]
ftpserver.main()
# no argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-p"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, ftpserver.main)
# invalid argument
sys.argv += ["-p foo"]
self.assertRaises(SystemExit, ftpserver.main)
def test_w_option(self):
sys.argv += ["-w", "-p", "0"]
warnings.filterwarnings("error")
try:
self.assertRaises(RuntimeWarning, ftpserver.main)
finally:
warnings.resetwarnings()
# unexpected argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-w foo"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, ftpserver.main)
def test_d_option(self):
sys.argv += ["-d", TESTFN, "-p", "0"]
if not os.path.isdir(TESTFN):
os.mkdir(TESTFN)
ftpserver.main()
# without argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-d"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, ftpserver.main)
# no such directory
sys.argv = self.SYSARGV[:]
sys.argv += ["-d %s" % TESTFN]
safe_rmdir(TESTFN)
self.assertRaises(ValueError, ftpserver.main)
def test_r_option(self):
sys.argv += ["-r 60000-61000", "-p", "0"]
ftpserver.main()
# without arg
sys.argv = self.SYSARGV[:]
sys.argv += ["-r"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, ftpserver.main)
# wrong arg
sys.argv = self.SYSARGV[:]
sys.argv += ["-r yyy-zzz"]
self.assertRaises(SystemExit, ftpserver.main)
def test_v_option(self):
sys.argv += ["-v"]
self.assertRaises(SystemExit, ftpserver.main)
# unexpected argument
sys.argv = self.SYSARGV[:]
sys.argv += ["-v foo"]
sys.stderr = self.devnull
self.assertRaises(SystemExit, ftpserver.main)
def test_main(tests=None):
test_suite = unittest.TestSuite()
if tests is None:
tests = [
TestAbstractedFS,
TestDummyAuthorizer,
TestCallLater,
TestCallEvery,
TestFtpAuthentication,
TestFtpDummyCmds,
TestFtpCmdsSemantic,
TestFtpFsOperations,
TestFtpStoreData,
TestFtpRetrieveData,
TestFtpListingCmds,
TestFtpAbort,
TestTimeouts,
TestConfigurableOptions,
TestCallbacks,
TestCornerCases,
TestUnicodePathNames,
TestCommandLineParser,
]
if SUPPORTS_IPV4:
tests.append(TestIPv4Environment)
if SUPPORTS_IPV6:
tests.append(TestIPv6Environment)
if SUPPORTS_HYBRID_IPV6:
tests.append(TestIPv6MixedEnvironment)
if SUPPORTS_SENDFILE:
tests.append(TestFtpRetrieveDataNoSendfile)
tests.append(TestFtpStoreDataNoSendfile)
else:
if os.name == 'posix':
atexit.register(warnings.warn, "couldn't run sendfile() tests",
RuntimeWarning)
for test in tests:
test_suite.addTest(unittest.makeSuite(test))
try:
unittest.TextTestRunner(verbosity=2).run(test_suite)
except:
# in case of KeyboardInterrupt grant that the threaded FTP
# server running in background gets stopped
asyncore.socket_map.clear()
raise
if __name__ == '__main__':
test_main()
|
{
"content_hash": "696f1b1acae5e158d70334630dbb005c",
"timestamp": "",
"source": "github",
"line_count": 3159,
"max_line_length": 90,
"avg_line_length": 37.74770496992719,
"alnum_prop": 0.5802339720743008,
"repo_name": "leighpauls/k2cro4",
"id": "1ec6db6cad83319c90acdc0dc670d84c51ece99b",
"size": "121389",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/pyftpdlib/src/test/test_ftpd.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ASP",
"bytes": "3062"
},
{
"name": "AppleScript",
"bytes": "25392"
},
{
"name": "Arduino",
"bytes": "464"
},
{
"name": "Assembly",
"bytes": "68131038"
},
{
"name": "C",
"bytes": "242794338"
},
{
"name": "C#",
"bytes": "11024"
},
{
"name": "C++",
"bytes": "353525184"
},
{
"name": "Common Lisp",
"bytes": "3721"
},
{
"name": "D",
"bytes": "1931"
},
{
"name": "Emacs Lisp",
"bytes": "1639"
},
{
"name": "F#",
"bytes": "4992"
},
{
"name": "FORTRAN",
"bytes": "10404"
},
{
"name": "Java",
"bytes": "3845159"
},
{
"name": "JavaScript",
"bytes": "39146656"
},
{
"name": "Lua",
"bytes": "13768"
},
{
"name": "Matlab",
"bytes": "22373"
},
{
"name": "Objective-C",
"bytes": "21887598"
},
{
"name": "PHP",
"bytes": "2344144"
},
{
"name": "Perl",
"bytes": "49033099"
},
{
"name": "Prolog",
"bytes": "2926122"
},
{
"name": "Python",
"bytes": "39863959"
},
{
"name": "R",
"bytes": "262"
},
{
"name": "Racket",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "304063"
},
{
"name": "Scheme",
"bytes": "14853"
},
{
"name": "Shell",
"bytes": "9195117"
},
{
"name": "Tcl",
"bytes": "1919771"
},
{
"name": "Verilog",
"bytes": "3092"
},
{
"name": "Visual Basic",
"bytes": "1430"
},
{
"name": "eC",
"bytes": "5079"
}
],
"symlink_target": ""
}
|
import unittest
from siapy import Sia
class SiaTest(unittest.TestCase):
def test_get_version(self):
sia = Sia()
self.assertEqual(sia.get_version(), '1.0.4')
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "7bfe570fb5ed704006a3192f09efc9f8",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 52,
"avg_line_length": 22.6,
"alnum_prop": 0.6194690265486725,
"repo_name": "lolsteve/siapy",
"id": "4b854834c5db25f9237998269151233369ec9d54",
"size": "226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sia_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13228"
}
],
"symlink_target": ""
}
|
"""Agent action implementations"""
import logging
import six
from cliff import command
from cliff import lister
from cliff import show
from openstackclient.common import utils
class CreateAgent(show.ShowOne):
"""Create agent command"""
log = logging.getLogger(__name__ + ".CreateAgent")
def get_parser(self, prog_name):
parser = super(CreateAgent, self).get_parser(prog_name)
parser.add_argument(
"os",
metavar="<os>",
help="Type of OS")
parser.add_argument(
"architecture",
metavar="<architecture>",
help="Type of architecture")
parser.add_argument(
"version",
metavar="<version>",
help="Version")
parser.add_argument(
"url",
metavar="<url>",
help="URL")
parser.add_argument(
"md5hash",
metavar="<md5hash>",
help="MD5 hash")
parser.add_argument(
"hypervisor",
metavar="<hypervisor>",
help="Type of hypervisor",
default="xen")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
args = (
parsed_args.os,
parsed_args.architecture,
parsed_args.version,
parsed_args.url,
parsed_args.md5hash,
parsed_args.hypervisor
)
agent = compute_client.agents.create(*args)._info.copy()
return zip(*sorted(six.iteritems(agent)))
class DeleteAgent(command.Command):
"""Delete agent command"""
log = logging.getLogger(__name__ + ".DeleteAgent")
def get_parser(self, prog_name):
parser = super(DeleteAgent, self).get_parser(prog_name)
parser.add_argument(
"id",
metavar="<id>",
help="ID of agent to delete")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
compute_client.agents.delete(parsed_args.id)
return
class ListAgent(lister.Lister):
"""List agent command"""
log = logging.getLogger(__name__ + ".ListAgent")
def get_parser(self, prog_name):
parser = super(ListAgent, self).get_parser(prog_name)
parser.add_argument(
"--hypervisor",
metavar="<hypervisor>",
help="Type of hypervisor")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
columns = (
"Agent ID",
"Hypervisor",
"OS",
"Architecture",
"Version",
"Md5Hash",
"URL"
)
data = compute_client.agents.list(parsed_args.hypervisor)
return (columns,
(utils.get_item_properties(
s, columns,
) for s in data))
class SetAgent(show.ShowOne):
"""Set agent command"""
log = logging.getLogger(__name__ + ".SetAgent")
def get_parser(self, prog_name):
parser = super(SetAgent, self).get_parser(prog_name)
parser.add_argument(
"id",
metavar="<id>",
help="ID of the agent")
parser.add_argument(
"version",
metavar="<version>",
help="Version of the agent")
parser.add_argument(
"url",
metavar="<url>",
help="URL")
parser.add_argument(
"md5hash",
metavar="<md5hash>",
help="MD5 hash")
return parser
def take_action(self, parsed_args):
self.log.debug("take_action(%s)" % parsed_args)
compute_client = self.app.client_manager.compute
args = (
parsed_args.id,
parsed_args.version,
parsed_args.url,
parsed_args.md5hash
)
agent = compute_client.agents.update(*args)._info.copy()
return zip(*sorted(six.iteritems(agent)))
|
{
"content_hash": "9c89462e8ab921a11644861b7a5c7033",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 65,
"avg_line_length": 28.536912751677853,
"alnum_prop": 0.5402163687676388,
"repo_name": "derekchiang/python-openstackclient",
"id": "c8fb6ccc0dd5afc9a18b8218f04c18407df5e264",
"size": "4860",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "openstackclient/compute/v2/agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "676848"
},
{
"name": "Shell",
"bytes": "1128"
}
],
"symlink_target": ""
}
|
"""UI Functions"""
import ctypes
import logging
import platform
from enum import Enum
from pathlib import Path
import qdarkstyle
from PyQt5 import QtCore, QtGui, QtWidgets
from config import configurations
from config.constants import ICON_FILE
logger = logging.getLogger(__name__)
def set_window_icon(widget: QtWidgets.QWidget, icon=None):
"""Set PyQt Window Icon
Parameters
----------
widget : QtWidgets.QWidget
icon : str or None
Path to icon file. If None, use default icon (file.png)
"""
if not icon:
icon = Path.cwd() / 'ui' / 'icons' / ICON_FILE
widget.setWindowIcon(QtGui.QIcon(str(icon)))
def set_font_scale(widget: QtWidgets.QWidget, size=None, scale=1.0):
"""Set font scale
Adjust font scaling (and optionally size) for HiDPI display (e.g. macOS devices).
Notes
-----
This can also override the default font size to arbitrary values although the default
values are good enough on non HiDPI display (e.g. Windows 7).
Parameters
----------
widget : QtWidgets.QWidget
size : int
Set the default font size. If None, use FontSize value from TOML settings.
scale : float
The scale multiplier to resize the font size.
"""
font = QtGui.QFont()
if not size:
size = configurations.get_setting('UI', 'FontSize')
system = platform.system()
# TODO: Get access to macOS device with Retina display
if system == 'Darwin':
scale = 1.0
elif system == 'Linux':
scale = 1.0
font.setPointSize(size * scale)
widget.setFont(font)
def center_screen(widget: QtWidgets.QWidget):
"""Center PyQt Window on screen"""
resolution = QtWidgets.QDesktopWidget().screenGeometry()
widget.move((resolution.width() / 2) - (widget.frameSize().width() / 2),
(resolution.height() / 2) - (widget.frameSize().height() / 2))
def always_on_top(widget: QtWidgets.QWidget):
"""Toggle AlwaysOnTop (works in Windows and Linux)"""
widget.setWindowFlags(widget.windowFlags() & ~QtCore.Qt.WindowStaysOnTopHint)
checked = widget.actionAlwaysOnTop.isChecked()
if checked:
widget.setWindowFlags(widget.windowFlags() | QtCore.Qt.WindowStaysOnTopHint)
logger.debug('Always on Top Enabled' if checked else 'Always on Top Disabled')
widget.show()
def taskbar_icon():
"""Workaround to show setWindowIcon on Win7 taskbar instead of default Python icon"""
if platform.system() == 'Windows':
app_id = u'taukeke.python.assetsbrowser' # arbitrary string
ctypes.windll.shell32.SetCurrentProcessExplicitAppUserModelID(app_id)
def enable_hidpi(app):
"""Enable HiDPI support for QApplication.
Parameters
----------
app : QtWidgets.QApplication
"""
app.setAttribute(QtCore.Qt.AA_EnableHighDpiScaling, True)
app.setAttribute(QtCore.Qt.AA_UseHighDpiPixmaps, True)
logger.info('High DPI Scaling and Pixmaps Enabled')
def theme_loader(app):
"""Theme loader for QApplication.
Parameters
----------
app : QtWidgets.QApplication
"""
theme = configurations.get_setting("UI", "Theme")
if theme == "LIGHT":
app.setStyle('Fusion')
if theme == "DARK":
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
def generate_stylesheet(font=None, size=12):
"""Generate/update stylesheet for QApplication
Handle font-family and font-size for users working with different
language like Japanese, Traditional/Simplified Chinese, etc
Parameters
----------
font : str or None
Font name. If None, use system default font
size : int
Font size
"""
font_db = QtGui.QFontDatabase()
if not font or font == 'sans-serif':
font = font_db.systemFont(font_db.GeneralFont).family()
elif font == 'monospace':
font = font_db.systemFont(font_db.FixedFont).family()
css = (
"QWidget { "
f"font-family: '{font}'; "
f"font-size: {size}px; "
"}"
)
css_path = Path(__file__).parent / 'stylesheet.css'
with open(css_path, 'w') as f:
f.write(css)
def checked_radio(enum: Enum, radios: dict):
"""Checked radio button
Retrieve the QRadioButton to be check
Parameters
----------
enum : Enum
radios : dict
Dict mapping must have the following key value pairs
- Key: Enum name
- Value: QRadioButton
"""
radio = radios.get(enum.name)
radio.setChecked(True)
|
{
"content_hash": "d64cbc8a2f6db0db50501b1c2bdd59b9",
"timestamp": "",
"source": "github",
"line_count": 164,
"max_line_length": 89,
"avg_line_length": 27.51219512195122,
"alnum_prop": 0.6496010638297872,
"repo_name": "hueyyeng/AssetsBrowser",
"id": "8fa0c15894665b30002e1d86168a0548b79dd5ec",
"size": "4512",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "ui/functions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4624"
},
{
"name": "Python",
"bytes": "109083"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Competition.payment_option'
db.add_column('competition_competition', 'payment_option',
self.gf('django.db.models.fields.CharField')(default='T', max_length=1),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Competition.payment_option'
db.delete_column('competition_competition', 'payment_option')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'competition.avatar': {
'Meta': {'object_name': 'Avatar'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'image_height': ('django.db.models.fields.IntegerField', [], {}),
'image_width': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'thumbnail_height': ('django.db.models.fields.IntegerField', [], {}),
'thumbnail_width': ('django.db.models.fields.IntegerField', [], {})
},
'competition.competition': {
'Meta': {'ordering': "['-is_running', '-is_open', '-start_time']", 'object_name': 'Competition'},
'avatar': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['competition.Avatar']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'cost': ('django.db.models.fields.FloatField', [], {}),
'description': ('django.db.models.fields.TextField', [], {}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_open': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'max_num_team_members': ('django.db.models.fields.IntegerField', [], {}),
'min_num_team_members': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'payment_option': ('django.db.models.fields.CharField', [], {'default': "'T'", 'max_length': '1'}),
'questions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['competition.RegistrationQuestion']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {})
},
'competition.game': {
'Meta': {'object_name': 'Game'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}),
'end_time': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {})
},
'competition.invitation': {
'Meta': {'ordering': "['-sent']", 'object_name': 'Invitation'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'receiver': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'received_invitations'", 'to': "orm['auth.User']"}),
'response': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'sender': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'sent_invitations'", 'to': "orm['auth.User']"}),
'sent': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Team']"})
},
'competition.organizer': {
'Meta': {'object_name': 'Organizer'},
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['competition.OrganizerRole']", 'symmetrical': 'False'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'competition.organizerrole': {
'Meta': {'object_name': 'OrganizerRole'},
'description': ('django.db.models.fields.TextField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'competition.registration': {
'Meta': {'object_name': 'Registration'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'signup_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'competition.registrationquestion': {
'Meta': {'object_name': 'RegistrationQuestion'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.TextField', [], {}),
'question_type': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
'competition.registrationquestionchoice': {
'Meta': {'object_name': 'RegistrationQuestionChoice'},
'choice': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'question_choice_set'", 'to': "orm['competition.RegistrationQuestion']"})
},
'competition.registrationquestionresponse': {
'Meta': {'object_name': 'RegistrationQuestionResponse'},
'agreed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'choices': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'response_set'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['competition.RegistrationQuestionChoice']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'response_set'", 'to': "orm['competition.RegistrationQuestion']"}),
'registration': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'response_set'", 'to': "orm['competition.Registration']"}),
'text_response': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'competition.score': {
'Meta': {'object_name': 'Score'},
'game': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Game']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'score': ('django.db.models.fields.PositiveIntegerField', [], {}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Team']"})
},
'competition.team': {
'Meta': {'ordering': "['name']", 'unique_together': "(('competition', 'slug'),)", 'object_name': 'Team'},
'avatar': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['competition.Avatar']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'competition': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['competition.Competition']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'eligible_to_win': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'paid': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'blank': 'True'}),
'time_paid': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['competition']
|
{
"content_hash": "d45f8c4854cf1015eec89fc6dda3c455",
"timestamp": "",
"source": "github",
"line_count": 168,
"max_line_length": 227,
"avg_line_length": 74.77380952380952,
"alnum_prop": 0.5570768985830282,
"repo_name": "michaelwisely/django-competition",
"id": "0db7df94aadfd5bae153680bc36a80ad30d459ab",
"size": "12586",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/competition/migrations/0004_auto__add_field_competition_payment_option.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3975"
},
{
"name": "Cucumber",
"bytes": "1592"
},
{
"name": "HTML",
"bytes": "41099"
},
{
"name": "JavaScript",
"bytes": "2111"
},
{
"name": "Makefile",
"bytes": "782"
},
{
"name": "Python",
"bytes": "290534"
}
],
"symlink_target": ""
}
|
import argparse
import logging
import math
import os
import random
from pathlib import Path
from typing import Optional
import numpy as np
import torch
import torch.utils.checkpoint
from torch.utils.data import Dataset
import jax
import jax.numpy as jnp
import optax
import PIL
import transformers
from diffusers import (
FlaxAutoencoderKL,
FlaxDDPMScheduler,
FlaxPNDMScheduler,
FlaxStableDiffusionPipeline,
FlaxUNet2DConditionModel,
)
from diffusers.pipelines.stable_diffusion import FlaxStableDiffusionSafetyChecker
from flax import jax_utils
from flax.training import train_state
from flax.training.common_utils import shard
from huggingface_hub import HfFolder, Repository, whoami
# TODO: remove and import from diffusers.utils when the new version of diffusers is released
from packaging import version
from PIL import Image
from torchvision import transforms
from tqdm.auto import tqdm
from transformers import CLIPFeatureExtractor, CLIPTokenizer, FlaxCLIPTextModel, set_seed
if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
PIL_INTERPOLATION = {
"linear": PIL.Image.Resampling.BILINEAR,
"bilinear": PIL.Image.Resampling.BILINEAR,
"bicubic": PIL.Image.Resampling.BICUBIC,
"lanczos": PIL.Image.Resampling.LANCZOS,
"nearest": PIL.Image.Resampling.NEAREST,
}
else:
PIL_INTERPOLATION = {
"linear": PIL.Image.LINEAR,
"bilinear": PIL.Image.BILINEAR,
"bicubic": PIL.Image.BICUBIC,
"lanczos": PIL.Image.LANCZOS,
"nearest": PIL.Image.NEAREST,
}
# ------------------------------------------------------------------------------
logger = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(description="Simple example of a training script.")
parser.add_argument(
"--pretrained_model_name_or_path",
type=str,
default=None,
required=True,
help="Path to pretrained model or model identifier from huggingface.co/models.",
)
parser.add_argument(
"--tokenizer_name",
type=str,
default=None,
help="Pretrained tokenizer name or path if not the same as model_name",
)
parser.add_argument(
"--train_data_dir", type=str, default=None, required=True, help="A folder containing the training data."
)
parser.add_argument(
"--placeholder_token",
type=str,
default=None,
required=True,
help="A token to use as a placeholder for the concept.",
)
parser.add_argument(
"--initializer_token", type=str, default=None, required=True, help="A token to use as initializer word."
)
parser.add_argument("--learnable_property", type=str, default="object", help="Choose between 'object' and 'style'")
parser.add_argument("--repeats", type=int, default=100, help="How many times to repeat the training data.")
parser.add_argument(
"--output_dir",
type=str,
default="text-inversion-model",
help="The output directory where the model predictions and checkpoints will be written.",
)
parser.add_argument("--seed", type=int, default=42, help="A seed for reproducible training.")
parser.add_argument(
"--resolution",
type=int,
default=512,
help=(
"The resolution for input images, all the images in the train/validation dataset will be resized to this"
" resolution"
),
)
parser.add_argument(
"--center_crop", action="store_true", help="Whether to center crop images before resizing to resolution"
)
parser.add_argument(
"--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
)
parser.add_argument("--num_train_epochs", type=int, default=100)
parser.add_argument(
"--max_train_steps",
type=int,
default=5000,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
)
parser.add_argument(
"--learning_rate",
type=float,
default=1e-4,
help="Initial learning rate (after the potential warmup period) to use.",
)
parser.add_argument(
"--scale_lr",
action="store_true",
default=True,
help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
)
parser.add_argument(
"--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
)
parser.add_argument(
"--lr_scheduler",
type=str,
default="constant",
help=(
'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
' "constant", "constant_with_warmup"]'
),
)
parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
parser.add_argument(
"--use_auth_token",
action="store_true",
help=(
"Will use the token generated when running `huggingface-cli login` (necessary to use this script with"
" private models)."
),
)
parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
parser.add_argument(
"--hub_model_id",
type=str,
default=None,
help="The name of the repository to keep in sync with the local `output_dir`.",
)
parser.add_argument(
"--logging_dir",
type=str,
default="logs",
help=(
"[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
" *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
),
)
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
args = parser.parse_args()
env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
if env_local_rank != -1 and env_local_rank != args.local_rank:
args.local_rank = env_local_rank
if args.train_data_dir is None:
raise ValueError("You must specify a train data directory.")
return args
imagenet_templates_small = [
"a photo of a {}",
"a rendering of a {}",
"a cropped photo of the {}",
"the photo of a {}",
"a photo of a clean {}",
"a photo of a dirty {}",
"a dark photo of the {}",
"a photo of my {}",
"a photo of the cool {}",
"a close-up photo of a {}",
"a bright photo of the {}",
"a cropped photo of a {}",
"a photo of the {}",
"a good photo of the {}",
"a photo of one {}",
"a close-up photo of the {}",
"a rendition of the {}",
"a photo of the clean {}",
"a rendition of a {}",
"a photo of a nice {}",
"a good photo of a {}",
"a photo of the nice {}",
"a photo of the small {}",
"a photo of the weird {}",
"a photo of the large {}",
"a photo of a cool {}",
"a photo of a small {}",
]
imagenet_style_templates_small = [
"a painting in the style of {}",
"a rendering in the style of {}",
"a cropped painting in the style of {}",
"the painting in the style of {}",
"a clean painting in the style of {}",
"a dirty painting in the style of {}",
"a dark painting in the style of {}",
"a picture in the style of {}",
"a cool painting in the style of {}",
"a close-up painting in the style of {}",
"a bright painting in the style of {}",
"a cropped painting in the style of {}",
"a good painting in the style of {}",
"a close-up painting in the style of {}",
"a rendition in the style of {}",
"a nice painting in the style of {}",
"a small painting in the style of {}",
"a weird painting in the style of {}",
"a large painting in the style of {}",
]
class TextualInversionDataset(Dataset):
def __init__(
self,
data_root,
tokenizer,
learnable_property="object", # [object, style]
size=512,
repeats=100,
interpolation="bicubic",
flip_p=0.5,
set="train",
placeholder_token="*",
center_crop=False,
):
self.data_root = data_root
self.tokenizer = tokenizer
self.learnable_property = learnable_property
self.size = size
self.placeholder_token = placeholder_token
self.center_crop = center_crop
self.flip_p = flip_p
self.image_paths = [os.path.join(self.data_root, file_path) for file_path in os.listdir(self.data_root)]
self.num_images = len(self.image_paths)
self._length = self.num_images
if set == "train":
self._length = self.num_images * repeats
self.interpolation = {
"linear": PIL_INTERPOLATION["linear"],
"bilinear": PIL_INTERPOLATION["bilinear"],
"bicubic": PIL_INTERPOLATION["bicubic"],
"lanczos": PIL_INTERPOLATION["lanczos"],
}[interpolation]
self.templates = imagenet_style_templates_small if learnable_property == "style" else imagenet_templates_small
self.flip_transform = transforms.RandomHorizontalFlip(p=self.flip_p)
def __len__(self):
return self._length
def __getitem__(self, i):
example = {}
image = Image.open(self.image_paths[i % self.num_images])
if not image.mode == "RGB":
image = image.convert("RGB")
placeholder_string = self.placeholder_token
text = random.choice(self.templates).format(placeholder_string)
example["input_ids"] = self.tokenizer(
text,
padding="max_length",
truncation=True,
max_length=self.tokenizer.model_max_length,
return_tensors="pt",
).input_ids[0]
# default to score-sde preprocessing
img = np.array(image).astype(np.uint8)
if self.center_crop:
crop = min(img.shape[0], img.shape[1])
h, w, = (
img.shape[0],
img.shape[1],
)
img = img[(h - crop) // 2 : (h + crop) // 2, (w - crop) // 2 : (w + crop) // 2]
image = Image.fromarray(img)
image = image.resize((self.size, self.size), resample=self.interpolation)
image = self.flip_transform(image)
image = np.array(image).astype(np.uint8)
image = (image / 127.5 - 1.0).astype(np.float32)
example["pixel_values"] = torch.from_numpy(image).permute(2, 0, 1)
return example
def get_full_repo_name(model_id: str, organization: Optional[str] = None, token: Optional[str] = None):
if token is None:
token = HfFolder.get_token()
if organization is None:
username = whoami(token)["name"]
return f"{username}/{model_id}"
else:
return f"{organization}/{model_id}"
def resize_token_embeddings(model, new_num_tokens, initializer_token_id, placeholder_token_id, rng):
if model.config.vocab_size == new_num_tokens or new_num_tokens is None:
return
model.config.vocab_size = new_num_tokens
params = model.params
old_embeddings = params["text_model"]["embeddings"]["token_embedding"]["embedding"]
old_num_tokens, emb_dim = old_embeddings.shape
initializer = jax.nn.initializers.normal()
new_embeddings = initializer(rng, (new_num_tokens, emb_dim))
new_embeddings = new_embeddings.at[:old_num_tokens].set(old_embeddings)
new_embeddings = new_embeddings.at[placeholder_token_id].set(new_embeddings[initializer_token_id])
params["text_model"]["embeddings"]["token_embedding"]["embedding"] = new_embeddings
model.params = params
return model
def get_params_to_save(params):
return jax.device_get(jax.tree_util.tree_map(lambda x: x[0], params))
def main():
args = parse_args()
if args.seed is not None:
set_seed(args.seed)
if jax.process_index() == 0:
if args.push_to_hub:
if args.hub_model_id is None:
repo_name = get_full_repo_name(Path(args.output_dir).name, token=args.hub_token)
else:
repo_name = args.hub_model_id
repo = Repository(args.output_dir, clone_from=repo_name)
with open(os.path.join(args.output_dir, ".gitignore"), "w+") as gitignore:
if "step_*" not in gitignore:
gitignore.write("step_*\n")
if "epoch_*" not in gitignore:
gitignore.write("epoch_*\n")
elif args.output_dir is not None:
os.makedirs(args.output_dir, exist_ok=True)
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
logger.setLevel(logging.INFO if jax.process_index() == 0 else logging.ERROR)
if jax.process_index() == 0:
transformers.utils.logging.set_verbosity_info()
else:
transformers.utils.logging.set_verbosity_error()
# Load the tokenizer and add the placeholder token as a additional special token
if args.tokenizer_name:
tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
elif args.pretrained_model_name_or_path:
tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
# Add the placeholder token in tokenizer
num_added_tokens = tokenizer.add_tokens(args.placeholder_token)
if num_added_tokens == 0:
raise ValueError(
f"The tokenizer already contains the token {args.placeholder_token}. Please pass a different"
" `placeholder_token` that is not already in the tokenizer."
)
# Convert the initializer_token, placeholder_token to ids
token_ids = tokenizer.encode(args.initializer_token, add_special_tokens=False)
# Check if initializer_token is a single token or a sequence of tokens
if len(token_ids) > 1:
raise ValueError("The initializer token must be a single token.")
initializer_token_id = token_ids[0]
placeholder_token_id = tokenizer.convert_tokens_to_ids(args.placeholder_token)
# Load models and create wrapper for stable diffusion
text_encoder = FlaxCLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="text_encoder")
vae, vae_params = FlaxAutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae")
unet, unet_params = FlaxUNet2DConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder="unet")
# Create sampling rng
rng = jax.random.PRNGKey(args.seed)
rng, _ = jax.random.split(rng)
# Resize the token embeddings as we are adding new special tokens to the tokenizer
text_encoder = resize_token_embeddings(
text_encoder, len(tokenizer), initializer_token_id, placeholder_token_id, rng
)
original_token_embeds = text_encoder.params["text_model"]["embeddings"]["token_embedding"]["embedding"]
train_dataset = TextualInversionDataset(
data_root=args.train_data_dir,
tokenizer=tokenizer,
size=args.resolution,
placeholder_token=args.placeholder_token,
repeats=args.repeats,
learnable_property=args.learnable_property,
center_crop=args.center_crop,
set="train",
)
def collate_fn(examples):
pixel_values = torch.stack([example["pixel_values"] for example in examples])
input_ids = torch.stack([example["input_ids"] for example in examples])
batch = {"pixel_values": pixel_values, "input_ids": input_ids}
batch = {k: v.numpy() for k, v in batch.items()}
return batch
total_train_batch_size = args.train_batch_size * jax.local_device_count()
train_dataloader = torch.utils.data.DataLoader(
train_dataset, batch_size=total_train_batch_size, shuffle=True, drop_last=True, collate_fn=collate_fn
)
# Optimization
if args.scale_lr:
args.learning_rate = args.learning_rate * total_train_batch_size
constant_scheduler = optax.constant_schedule(args.learning_rate)
optimizer = optax.adamw(
learning_rate=constant_scheduler,
b1=args.adam_beta1,
b2=args.adam_beta2,
eps=args.adam_epsilon,
weight_decay=args.adam_weight_decay,
)
def create_mask(params, label_fn):
def _map(params, mask, label_fn):
for k in params:
if label_fn(k):
mask[k] = "token_embedding"
else:
if isinstance(params[k], dict):
mask[k] = {}
_map(params[k], mask[k], label_fn)
else:
mask[k] = "zero"
mask = {}
_map(params, mask, label_fn)
return mask
def zero_grads():
# from https://github.com/deepmind/optax/issues/159#issuecomment-896459491
def init_fn(_):
return ()
def update_fn(updates, state, params=None):
return jax.tree_util.tree_map(jnp.zeros_like, updates), ()
return optax.GradientTransformation(init_fn, update_fn)
# Zero out gradients of layers other than the token embedding layer
tx = optax.multi_transform(
{"token_embedding": optimizer, "zero": zero_grads()},
create_mask(text_encoder.params, lambda s: s == "token_embedding"),
)
state = train_state.TrainState.create(apply_fn=text_encoder.__call__, params=text_encoder.params, tx=tx)
noise_scheduler = FlaxDDPMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", num_train_timesteps=1000
)
# Initialize our training
train_rngs = jax.random.split(rng, jax.local_device_count())
# Define gradient train step fn
def train_step(state, vae_params, unet_params, batch, train_rng):
dropout_rng, sample_rng, new_train_rng = jax.random.split(train_rng, 3)
def compute_loss(params):
vae_outputs = vae.apply(
{"params": vae_params}, batch["pixel_values"], deterministic=True, method=vae.encode
)
latents = vae_outputs.latent_dist.sample(sample_rng)
# (NHWC) -> (NCHW)
latents = jnp.transpose(latents, (0, 3, 1, 2))
latents = latents * 0.18215
noise_rng, timestep_rng = jax.random.split(sample_rng)
noise = jax.random.normal(noise_rng, latents.shape)
bsz = latents.shape[0]
timesteps = jax.random.randint(
timestep_rng,
(bsz,),
0,
noise_scheduler.config.num_train_timesteps,
)
noisy_latents = noise_scheduler.add_noise(latents, noise, timesteps)
encoder_hidden_states = state.apply_fn(
batch["input_ids"], params=params, dropout_rng=dropout_rng, train=True
)[0]
unet_outputs = unet.apply(
{"params": unet_params}, noisy_latents, timesteps, encoder_hidden_states, train=False
)
noise_pred = unet_outputs.sample
loss = (noise - noise_pred) ** 2
loss = loss.mean()
return loss
grad_fn = jax.value_and_grad(compute_loss)
loss, grad = grad_fn(state.params)
grad = jax.lax.pmean(grad, "batch")
new_state = state.apply_gradients(grads=grad)
# Keep the token embeddings fixed except the newly added embeddings for the concept,
# as we only want to optimize the concept embeddings
token_embeds = original_token_embeds.at[placeholder_token_id].set(
new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"][placeholder_token_id]
)
new_state.params["text_model"]["embeddings"]["token_embedding"]["embedding"] = token_embeds
metrics = {"loss": loss}
metrics = jax.lax.pmean(metrics, axis_name="batch")
return new_state, metrics, new_train_rng
# Create parallel version of the train and eval step
p_train_step = jax.pmap(train_step, "batch", donate_argnums=(0,))
# Replicate the train state on each device
state = jax_utils.replicate(state)
vae_params = jax_utils.replicate(vae_params)
unet_params = jax_utils.replicate(unet_params)
# Train!
num_update_steps_per_epoch = math.ceil(len(train_dataloader))
# Scheduler and math around the number of training steps.
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
logger.info("***** Running training *****")
logger.info(f" Num examples = {len(train_dataset)}")
logger.info(f" Num Epochs = {args.num_train_epochs}")
logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
logger.info(f" Total train batch size (w. parallel & distributed) = {total_train_batch_size}")
logger.info(f" Total optimization steps = {args.max_train_steps}")
global_step = 0
epochs = tqdm(range(args.num_train_epochs), desc=f"Epoch ... (1/{args.num_train_epochs})", position=0)
for epoch in epochs:
# ======================== Training ================================
train_metrics = []
steps_per_epoch = len(train_dataset) // total_train_batch_size
train_step_progress_bar = tqdm(total=steps_per_epoch, desc="Training...", position=1, leave=False)
# train
for batch in train_dataloader:
batch = shard(batch)
state, train_metric, train_rngs = p_train_step(state, vae_params, unet_params, batch, train_rngs)
train_metrics.append(train_metric)
train_step_progress_bar.update(1)
global_step += 1
if global_step >= args.max_train_steps:
break
train_metric = jax_utils.unreplicate(train_metric)
train_step_progress_bar.close()
epochs.write(f"Epoch... ({epoch + 1}/{args.num_train_epochs} | Loss: {train_metric['loss']})")
# Create the pipeline using using the trained modules and save it.
if jax.process_index() == 0:
scheduler = FlaxPNDMScheduler(
beta_start=0.00085, beta_end=0.012, beta_schedule="scaled_linear", skip_prk_steps=True
)
safety_checker = FlaxStableDiffusionSafetyChecker.from_pretrained(
"CompVis/stable-diffusion-safety-checker", from_pt=True
)
pipeline = FlaxStableDiffusionPipeline(
text_encoder=text_encoder,
vae=vae,
unet=unet,
tokenizer=tokenizer,
scheduler=scheduler,
safety_checker=safety_checker,
feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32"),
)
pipeline.save_pretrained(
args.output_dir,
params={
"text_encoder": get_params_to_save(state.params),
"vae": get_params_to_save(vae_params),
"unet": get_params_to_save(unet_params),
"safety_checker": safety_checker.params,
},
)
# Also save the newly trained embeddings
learned_embeds = get_params_to_save(state.params)["text_model"]["embeddings"]["token_embedding"]["embedding"][
placeholder_token_id
]
learned_embeds_dict = {args.placeholder_token: learned_embeds}
jnp.save(os.path.join(args.output_dir, "learned_embeds.npy"), learned_embeds_dict)
if args.push_to_hub:
repo.push_to_hub(commit_message="End of training", blocking=False, auto_lfs_prune=True)
if __name__ == "__main__":
main()
|
{
"content_hash": "3dbbf6a7981c42529714895d197a9035",
"timestamp": "",
"source": "github",
"line_count": 650,
"max_line_length": 119,
"avg_line_length": 37.74153846153846,
"alnum_prop": 0.6191504973096363,
"repo_name": "huggingface/diffusers",
"id": "6406be8ad698fb2c9684c6dcbeed67dd865b323c",
"size": "24532",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "examples/textual_inversion/textual_inversion_flax.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7145"
},
{
"name": "Makefile",
"bytes": "2769"
},
{
"name": "Python",
"bytes": "2481515"
}
],
"symlink_target": ""
}
|
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass into py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
sys.path.insert(0, os.path.dirname(__file__))
errno = pytest.main(self.pytest_args)
sys.exit(errno)
requirements = [
'requests',
]
test_requirements = [
'pytest>=2.5.0',
'pytest-cov>=1.7',
]
setup(
name='maxmind_api',
version='0.2',
packages=['maxmind_api'],
url='https://volsor.com/',
license='Proprietary',
author='Volsor',
author_email='admin@volsor.com',
description='',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
],
cmdclass={'test': PyTest},
include_package_data=True,
install_requires=requirements,
tests_require=test_requirements
)
|
{
"content_hash": "8f3bbb1271d26db1e472fce39005607f",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 76,
"avg_line_length": 24.903225806451612,
"alnum_prop": 0.6152849740932642,
"repo_name": "Stranger6667/MaxMindAPI",
"id": "93440b3fce3fb58bd03f207f9b3dc56c46cdf018",
"size": "1582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1282"
},
{
"name": "Python",
"bytes": "2954"
}
],
"symlink_target": ""
}
|
import csv
import json
import re
CRS_RE = re.compile(r'[A-Z]{3}')
class Station(dict):
def __init__(self, code, name) -> None:
self['stationCode'] = code
self['stationName'] = name
def __hash__(self) -> int:
return hash((self['stationCode'], self['stationName']))
def __eq__(self, __o: object) -> bool:
return type(__o) == Station and self['stationCode'] == __o['stationCode'] and self['stationName'] == __o['stationName']
def __repr__(self) -> str:
return '{' + self['stationName'] + ', ' + self['stationCode'] + '}'
stations = set()
for letter in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']:
data = json.load(open(f'tmp/{letter}.json'))
for station in data:
if CRS_RE.match(station[0]) and station[10] != '':
stations.add(Station(station[0], station[1]))
with open('stations.csv', 'w', newline='') as out_file:
writer = csv.DictWriter(out_file, fieldnames=['stationName', 'stationCode'])
writer.writeheader()
sorted_stations = sorted(list(stations), key=lambda s: s['stationName'])
writer.writerows(sorted_stations)
print('done')
|
{
"content_hash": "2d4e31fd31d4ec7cb4d61bd5932cb426",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 145,
"avg_line_length": 33.24324324324324,
"alnum_prop": 0.5585365853658537,
"repo_name": "danielthepope/trntxt",
"id": "cd7feab66ab04c789ab9a17a098021b7d7e01161",
"size": "1253",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "resources/stationFetcher/jsonProcessor.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "291"
},
{
"name": "Dockerfile",
"bytes": "307"
},
{
"name": "HTML",
"bytes": "54"
},
{
"name": "JavaScript",
"bytes": "15607"
},
{
"name": "Makefile",
"bytes": "82"
},
{
"name": "Pug",
"bytes": "6532"
},
{
"name": "Python",
"bytes": "1253"
},
{
"name": "Shell",
"bytes": "251"
},
{
"name": "TypeScript",
"bytes": "31549"
}
],
"symlink_target": ""
}
|
from abc import ABCMeta, abstractmethod
class IUnikernelBackend(object):
"""
Interface that must be implemented by every Unikernel Backend. It contains method stubs used by the REST API
provider and other components.
Redefinition of functions decorated with @asbstractmethod is compulsory.
"""
__metaclass__ = ABCMeta
@abstractmethod
def __init__(
self,
_id
):
self.work_dir = None
self.executor = None
self._id = _id
@abstractmethod
def register(
self,
config: str,
unikernel: str
) -> str:
"""
Initialize directory structure for the unikernel, and register it to the database and scheduler.
:return: Working directory of the unikernel
"""
pass
@abstractmethod
def configure(self):
"""
Configure the unikernel to be built for the specific backend
:return:
"""
pass
@abstractmethod
def compile(self):
"""
Build the unikernel
:return:
"""
pass
@abstractmethod
def optimize(self):
"""
Optimize the unikernel binary/VM by stripping off debug symbols / applying data compression, etc.
:return:
"""
pass
@abstractmethod
def start(self):
"""
Launch/boot the unikernel
:return:
"""
pass
@abstractmethod
def get_status(self):
"""
Get status of the unikernel
:return:
"""
pass
@abstractmethod
def get_log(self):
"""
Get runtime log of the unikernel
:return:
"""
pass
@abstractmethod
def stop(self):
"""
Kill execution of the unikernel
:return:
"""
pass
@abstractmethod
def destroy(self):
"""
Destroy the unikernel, remove all assets, and unregister from database and scheduler.
:return:
"""
pass
|
{
"content_hash": "c1bda0119d5a062d6562dd1a9ef73a62",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 112,
"avg_line_length": 21.416666666666668,
"alnum_prop": 0.5437743190661478,
"repo_name": "onyb/dune",
"id": "ece4bbff8ac330f5ba1d17797e4d8b7f641b317e",
"size": "2056",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "core/backends/IUnikernelBackend.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "19704"
}
],
"symlink_target": ""
}
|
import collections
import json
import os
import stat
import warnings
import click
import six
from requests.auth import HTTPBasicAuth
from requests.exceptions import RequestException
from tower_cli import __version__, exceptions as exc
from tower_cli.api import client
from tower_cli.conf import with_global_options, Parser, settings, _apply_runtime_setting
from tower_cli.utils import secho, supports_oauth
from tower_cli.constants import CUR_API_VERSION
from tower_cli.cli.transfer.common import SEND_ORDER
__all__ = ['version', 'config', 'login', 'logout', 'receive', 'send', 'empty']
@click.command()
@with_global_options
def version():
"""Display full version information."""
# Print out the current version of Tower CLI.
click.echo('Tower CLI %s' % __version__)
# Print out the current API version of the current code base.
click.echo('API %s' % CUR_API_VERSION)
# Attempt to connect to the Ansible Tower server.
# If we succeed, print a version; if not, generate a failure.
try:
r = client.get('/config/')
except RequestException as ex:
raise exc.TowerCLIError('Could not connect to Ansible Tower.\n%s' %
six.text_type(ex))
config = r.json()
license = config.get('license_info', {}).get('license_type', 'open')
if license == 'open':
server_type = 'AWX'
else:
server_type = 'Ansible Tower'
click.echo('%s %s' % (server_type, config['version']))
# Print out Ansible version of server
click.echo('Ansible %s' % config['ansible_version'])
def _echo_setting(key):
"""Echo a setting to the CLI."""
value = getattr(settings, key)
secho('%s: ' % key, fg='magenta', bold=True, nl=False)
secho(
six.text_type(value),
bold=True,
fg='white' if isinstance(value, six.text_type) else 'cyan',
)
# Note: This uses `click.command`, not `tower_cli.utils.decorators.command`,
# because we don't want the "global" options that t.u.d.command adds.
@click.command()
@click.argument('key', required=False)
@click.argument('value', required=False)
@click.option('global_', '--global', is_flag=True,
help='Write this config option to the global configuration. '
'Probably will require sudo.\n'
'Deprecated: Use `--scope=global` instead.')
@click.option('--scope', type=click.Choice(['local', 'user', 'global']),
default='user',
help='The config file to write. '
'"local" writes to a config file in the local '
'directory; "user" writes to the home directory,'
' and "global" to a system-wide directory '
'(probably requires sudo).')
@click.option('--unset', is_flag=True,
help='Remove reference to this configuration option from '
'the config file.')
def config(key=None, value=None, scope='user', global_=False, unset=False):
"""Read or write tower-cli configuration.
`tower config` saves the given setting to the appropriate Tower CLI;
either the user's ~/.tower_cli.cfg file, or the /etc/tower/tower_cli.cfg
file if --global is used.
Writing to /etc/tower/tower_cli.cfg is likely to require heightened
permissions (in other words, sudo).
"""
# If the old-style `global_` option is set, issue a deprecation notice.
if global_:
scope = 'global'
warnings.warn('The `--global` option is deprecated and will be '
'removed. Use `--scope=global` to get the same effect.',
DeprecationWarning)
# If no key was provided, print out the current configuration
# in play.
if not key:
seen = set()
parser_desc = {
'runtime': 'Runtime options.',
'environment': 'Options from environment variables.',
'local': 'Local options (set with `tower-cli config '
'--scope=local`; stored in .tower_cli.cfg of this '
'directory or a parent)',
'user': 'User options (set with `tower-cli config`; stored in '
'~/.tower_cli.cfg).',
'global': 'Global options (set with `tower-cli config '
'--scope=global`, stored in /etc/tower/tower_cli.cfg).',
'defaults': 'Defaults.',
}
# Iterate over each parser (English: location we can get settings from)
# and print any settings that we haven't already seen.
#
# We iterate over settings from highest precedence to lowest, so any
# seen settings are overridden by the version we iterated over already.
click.echo('')
for name, parser in zip(settings._parser_names, settings._parsers):
# Determine if we're going to see any options in this
# parser that get echoed.
will_echo = False
for option in parser.options('general'):
if option in seen:
continue
will_echo = True
# Print a segment header
if will_echo:
secho('# %s' % parser_desc[name], fg='green', bold=True)
# Iterate over each option in the parser and, if we haven't
# already seen an option at higher precedence, print it.
for option in parser.options('general'):
if option in seen:
continue
_echo_setting(option)
seen.add(option)
# Print a nice newline, for formatting.
if will_echo:
click.echo('')
return
# Sanity check: Is this a valid configuration option? If it's not
# a key we recognize, abort.
if not hasattr(settings, key):
raise exc.TowerCLIError('Invalid configuration option "%s".' % key)
# Sanity check: The combination of a value and --unset makes no
# sense.
if value and unset:
raise exc.UsageError('Cannot provide both a value and --unset.')
# If a key was provided but no value was provided, then just
# print the current value for that key.
if key and not value and not unset:
_echo_setting(key)
return
# Okay, so we're *writing* a key. Let's do this.
# First, we need the appropriate file.
filename = os.path.expanduser('~/.tower_cli.cfg')
if scope == 'global':
if not os.path.isdir('/etc/tower/'):
raise exc.TowerCLIError('/etc/tower/ does not exist, and this '
'command cowardly declines to create it.')
filename = '/etc/tower/tower_cli.cfg'
elif scope == 'local':
filename = '.tower_cli.cfg'
# Read in the appropriate config file, write this value, and save
# the result back to the file.
parser = Parser()
parser.add_section('general')
parser.read(filename)
if unset:
parser.remove_option('general', key)
else:
parser.set('general', key, value)
with open(filename, 'w') as config_file:
parser.write(config_file)
# Give rw permissions to user only fix for issue number 48
try:
os.chmod(filename, stat.S_IRUSR | stat.S_IWUSR)
except Exception as e:
warnings.warn(
'Unable to set permissions on {0} - {1} '.format(filename, e),
UserWarning
)
click.echo('Configuration updated successfully.')
# TODO:
# Someday it would be nice to create these for us
# Thus the import reference to transfer.common.SEND_ORDER
@click.command()
@click.argument('username', required=True)
@click.option('--password', required=True, prompt=True, hide_input=True)
@click.option('--client-id', required=False)
@click.option('--client-secret', required=False)
@click.option('--scope', required=False, default='write',
type=click.Choice(['read', 'write']))
@click.option('-v', '--verbose', default=None,
help='Show information about requests being made.', is_flag=True,
required=False, callback=_apply_runtime_setting, is_eager=True)
def login(username, password, scope, client_id, client_secret, verbose):
"""
Retrieves and stores an OAuth2 personal auth token.
"""
if not supports_oauth():
raise exc.TowerCLIError(
'This version of Tower does not support OAuth2.0. Set credentials using tower-cli config.'
)
# Explicitly set a basic auth header for PAT acquisition (so that we don't
# try to auth w/ an existing user+pass or oauth2 token in a config file)
req = collections.namedtuple('req', 'headers')({})
if client_id and client_secret:
HTTPBasicAuth(client_id, client_secret)(req)
req.headers['Content-Type'] = 'application/x-www-form-urlencoded'
r = client.post(
'/o/token/',
data={
"grant_type": "password",
"username": username,
"password": password,
"scope": scope
},
headers=req.headers
)
elif client_id:
req.headers['Content-Type'] = 'application/x-www-form-urlencoded'
r = client.post(
'/o/token/',
data={
"grant_type": "password",
"username": username,
"password": password,
"client_id": client_id,
"scope": scope
},
headers=req.headers
)
else:
HTTPBasicAuth(username, password)(req)
r = client.post(
'/users/{}/personal_tokens/'.format(username),
data={"description": "Tower CLI", "application": None, "scope": scope},
headers=req.headers
)
if r.ok:
result = r.json()
result.pop('summary_fields', None)
result.pop('related', None)
if client_id:
token = result.pop('access_token', None)
else:
token = result.pop('token', None)
if settings.verbose:
# only print the actual token if -v
result['token'] = token
secho(json.dumps(result, indent=1), fg='blue', bold=True)
config.main(['oauth_token', token, '--scope=user'])
@click.command()
def logout():
"""
Removes an OAuth2 personal auth token from config.
"""
if not supports_oauth():
raise exc.TowerCLIError(
'This version of Tower does not support OAuth2.0'
)
config.main(['oauth_token', '--unset', '--scope=user'])
@click.command()
@with_global_options
@click.option('--organization', required=False, multiple=True)
@click.option('--user', required=False, multiple=True)
@click.option('--team', required=False, multiple=True)
@click.option('--credential_type', required=False, multiple=True)
@click.option('--credential', required=False, multiple=True)
@click.option('--notification_template', required=False, multiple=True)
@click.option('--inventory_script', required=False, multiple=True)
@click.option('--inventory', required=False, multiple=True)
@click.option('--project', required=False, multiple=True)
@click.option('--job_template', required=False, multiple=True)
@click.option('--workflow', required=False, multiple=True)
@click.option('--all', is_flag=True)
def receive(organization=None, user=None, team=None, credential_type=None, credential=None,
notification_template=None, inventory_script=None, inventory=None, project=None, job_template=None,
workflow=None, all=None):
"""Export assets from Tower.
'tower receive' exports one or more assets from a Tower instance
For all of the possible assets types the TEXT can either be the assets name
(or username for the case of a user) or the keyword all. Specifying all
will export all of the assets of that type.
"""
from tower_cli.cli.transfer.receive import Receiver
receiver = Receiver()
assets_to_export = {}
for asset_type in SEND_ORDER:
assets_to_export[asset_type] = locals()[asset_type]
receiver.receive(all=all, asset_input=assets_to_export)
@click.command()
@with_global_options
@click.argument('source', required=False, nargs=-1)
@click.option('--prevent', multiple=True, required=False,
help='Prevents import of a specific asset type.\n'
'Multiple prevent options can be passed.\n'
'If an asset type in the prevent list tries to be imported an error will occur')
@click.option('--exclude', multiple=True, required=False, help='Ignore specific asset type.\n'
'Multiple exclude options can be passed.\n'
'If an asset type in the exclude list tries to be imprted it will be ignored without an error')
@click.option('--secret_management', multiple=False, required=False, default='default',
type=click.Choice(['default', 'prompt', 'random']),
help='What to do with secrets for new items.\n'
'default - use "password", "token" or "secret" depending on the field'
'prompt - prompt for the secret to use'
'random - generate a random string for the secret'
)
@click.option('--no-color', is_flag=True,
help="Disable color output"
)
def send(source=None, prevent=None, exclude=None, secret_management='default', no_color=False):
"""Import assets into Tower.
'tower send' imports one or more assets into a Tower instance
The import can take either JSON or YAML.
Data can be sent on stdin (i.e. from tower-cli receive pipe) and/or from files
or directories passed as parameters.
If a directory is specified only files that end in .json, .yaml or .yml will be
imported. Other files will be ignored.
"""
from tower_cli.cli.transfer.send import Sender
sender = Sender(no_color)
sender.send(source, prevent, exclude, secret_management)
@click.command()
@with_global_options
@click.option('--organization', required=False, multiple=True)
@click.option('--user', required=False, multiple=True)
@click.option('--team', required=False, multiple=True)
@click.option('--credential_type', required=False, multiple=True)
@click.option('--credential', required=False, multiple=True)
@click.option('--notification_template', required=False, multiple=True)
@click.option('--inventory_script', required=False, multiple=True)
@click.option('--inventory', required=False, multiple=True)
@click.option('--project', required=False, multiple=True)
@click.option('--job_template', required=False, multiple=True)
@click.option('--workflow', required=False, multiple=True)
@click.option('--all', is_flag=True)
@click.option('--no-color', is_flag=True,
help="Disable color output"
)
def empty(organization=None, user=None, team=None, credential_type=None, credential=None, notification_template=None,
inventory_script=None, inventory=None, project=None, job_template=None, workflow=None,
all=None, no_color=False):
"""Empties assets from Tower.
'tower empty' removes all assets from Tower
"""
# Create an import/export object
from tower_cli.cli.transfer.cleaner import Cleaner
destroyer = Cleaner(no_color)
assets_to_export = {}
for asset_type in SEND_ORDER:
assets_to_export[asset_type] = locals()[asset_type]
destroyer.go_ham(all=all, asset_input=assets_to_export)
|
{
"content_hash": "9b22a4aa938918abe9f9d42a4fcf525a",
"timestamp": "",
"source": "github",
"line_count": 391,
"max_line_length": 117,
"avg_line_length": 39.56265984654731,
"alnum_prop": 0.625185855582132,
"repo_name": "AlanCoding/tower-cli",
"id": "cb96aad231df62804c2fb0e197def57d7c9c5cc4",
"size": "16053",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tower_cli/cli/misc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "2029"
},
{
"name": "Python",
"bytes": "770584"
},
{
"name": "Shell",
"bytes": "1890"
}
],
"symlink_target": ""
}
|
"""
Tests for the orographic-enhancement CLI
"""
import pytest
from improver.constants import LOOSE_TOLERANCE
from . import acceptance as acc
pytestmark = [pytest.mark.acc, acc.skip_if_kgo_missing]
OE = "orographic_enhancement_high_resolution"
CLI = acc.cli_name_with_dashes(__file__)
run_cli = acc.run_cli(CLI)
@pytest.mark.slow
def test_basic(tmp_path):
"""Test basic orographic enhancement"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/basic"
kgo_path = kgo_dir / "kgo_hi_res.nc"
input_args = [
kgo_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [*input_args, "--output", output_path]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=LOOSE_TOLERANCE)
@pytest.mark.slow
def test_boundary_height(tmp_path):
"""Test orographic enhancement with specified boundary height"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/boundary_height"
kgo_path = kgo_dir / "kgo_hi_res.nc"
input_dir = kgo_dir / "../basic"
input_args = [
input_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [
*input_args,
"--boundary-height=500.",
"--boundary-height-units=m",
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=LOOSE_TOLERANCE)
@pytest.mark.slow
def test_boundary_height_units(tmp_path):
"""Test orographic enhancement with boundary height unit conversion"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/boundary_height"
kgo_path = kgo_dir / "kgo_hi_res.nc"
input_dir = kgo_dir / "../basic"
input_args = [
input_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [
*input_args,
"--boundary-height=1640.41994751",
"--boundary-height-units=ft",
"--output",
output_path,
]
run_cli(args)
acc.compare(output_path, kgo_path, rtol=LOOSE_TOLERANCE)
def test_invalid_boundary_height(tmp_path):
"""Test excessively high boundary height"""
kgo_dir = acc.kgo_root() / "orographic_enhancement/boundary_height"
input_dir = kgo_dir / "../basic"
input_args = [
input_dir / f"{param}.nc"
for param in (
"temperature",
"humidity",
"pressure",
"wind_speed",
"wind_direction",
"orography_uk-standard_1km",
)
]
output_path = tmp_path / "output.nc"
args = [
*input_args,
"--boundary-height=500000.",
"--boundary-height-units=m",
"--output",
output_path,
]
with pytest.raises(ValueError, match=".*height.*"):
run_cli(args)
|
{
"content_hash": "73ef37eaec244627d83e05da4fa1021c",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 74,
"avg_line_length": 25.844961240310077,
"alnum_prop": 0.5581883623275345,
"repo_name": "fionaRust/improver",
"id": "afa0adc2b942cd7e13d32c63fb8189d2f3909c5b",
"size": "4991",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "improver_tests/acceptance/test_orographic_enhancement.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "5026255"
},
{
"name": "Shell",
"bytes": "9493"
}
],
"symlink_target": ""
}
|
import csv,sys,os
import numpy as np
from networkx import Graph
sys.path.append(os.path.join("..","tests","data"))
## a basic class to handle distance matrices used to describe pairwise relationships between
# genes in a gene set.
class DistanceMatrix:
## Constructor. Makes an object reprensentation of a distance matrix.
# @param convention is to use the species common name i.e. 'yeast', 'mouse' and 'human'.
# @param a csv saved version of a distance matrix (see example in ../tests/data)
def __init__(self,species,csvFile,forceInt=False):
self.species = species
self.csvFile = csvFile
self.forceInt = forceInt
self.load_pairwise_distance_from_csv()
self.get_descriptive_stats()
## loads a dictionary that represents a pairwise distance matrix
# also creates a unique list of the individual genes in the matrix
def load_pairwise_distance_from_csv(self):
reader = csv.reader(open(self.csvFile,'r'))
self.pairwiseDict = {}
header = reader.next()
self.geneNames = set([])
for linja in reader:
geneI = linja[0]
geneJ = linja[1]
dist = float(linja[2])
if dist != 1e8:
key = geneI + "#" + geneJ
if self.forceInt == False:
self.pairwiseDict[key] = dist
else:
self.pairwiseDict[key] = int(dist)
self.geneNames.update([geneI])
self.geneNames = list(self.geneNames)
self.geneNames.sort()
def create_distribution(self):
self.distances = np.array(self.pairwiseDict.values())
## returns the distance for given nodes i and j
def get_dist(self,nodeI,nodeJ):
key1 = nodeI + "#" + nodeJ
key2 = nodeJ + "#" + nodeI
if self.pairwiseDict.has_key(key1) == True:
return self.pairwiseDict[key1]
elif self.pairwiseDict.has_key(key2) == True:
return self.pairwiseDict[key2]
else:
return None
## return a networkx graph given a set of genes
def get_nx_graph(self,geneList):
G = Graph()
for i in range(len(geneList)):
geneI = geneList[i]
for j in range(len(geneList)):
geneJ = geneList[j]
dist = self.get_dist(geneI,geneJ)
if dist != None:
G.add_edge(geneI,geneJ,weight=dist)
return G
## return descriptive stats of the distance matrix
def get_descriptive_stats(self):
allDistances = np.zeros(len(self.pairwiseDict.keys()))
i = 0
for val in self.pairwiseDict.itervalues():
allDistances[i] = val
i+=1
allDistances.sort()
self.stats = {'mean':allDistances.mean(),
'std':allDistances.std(),
'min':allDistances.min(),
'max':allDistances.max(),
'75th':allDistances[int(round(0.75*allDistances.size))]}
#import matplotlib.pyplot as plt
#plt.hist(allDistances)
#plt.show()
|
{
"content_hash": "08dedff2f4f66e4ca44616967f40a9ce",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 93,
"avg_line_length": 33.95744680851064,
"alnum_prop": 0.5673558897243107,
"repo_name": "ksiomelo/cubix",
"id": "516bda72b27d5ad321df30bc6638c30e8ace7c74",
"size": "3192",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "spectralmix/DistanceMatrix.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "42207"
},
{
"name": "HTML",
"bytes": "108713"
},
{
"name": "JavaScript",
"bytes": "1212584"
},
{
"name": "Python",
"bytes": "365304"
}
],
"symlink_target": ""
}
|
"""Handles all requests relating to compute resources (e.g. guest VMs,
networking and storage of VMs, and compute hosts on which they run)."""
import base64
import copy
import functools
import re
import string
import uuid
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from oslo_utils import strutils
from oslo_utils import timeutils
from oslo_utils import units
from oslo_utils import uuidutils
import six
from six.moves import range
from nova import availability_zones
from nova import block_device
from nova.cells import opts as cells_opts
from nova.compute import flavors
from nova.compute import instance_actions
from nova.compute import power_state
from nova.compute import rpcapi as compute_rpcapi
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import crypto
from nova.db import base
from nova import exception
from nova import hooks
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LI
from nova.i18n import _LW
from nova import image
from nova import keymgr
from nova import network
from nova.network import model as network_model
from nova.network.security_group import openstack_driver
from nova.network.security_group import security_group_base
from nova import notifications
from nova import objects
from nova.objects import base as obj_base
from nova.objects import block_device as block_device_obj
from nova.objects import keypair as keypair_obj
from nova.objects import quotas as quotas_obj
from nova.objects import security_group as security_group_obj
from nova.pci import request as pci_request
import nova.policy
from nova import rpc
from nova.scheduler import client as scheduler_client
from nova import servicegroup
from nova import utils
from nova.virt import hardware
from nova import volume
LOG = logging.getLogger(__name__)
get_notifier = functools.partial(rpc.get_notifier, service='compute')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
compute_opts = [
cfg.BoolOpt('allow_resize_to_same_host',
default=False,
help='Allow destination machine to match source for resize. '
'Useful when testing in single-host environments.'),
cfg.StrOpt('default_schedule_zone',
help='Availability zone to use when user doesn\'t specify one'),
cfg.ListOpt('non_inheritable_image_properties',
default=['cache_in_nova',
'bittorrent'],
help='These are image properties which a snapshot should not'
' inherit from an instance'),
cfg.StrOpt('null_kernel',
default='nokernel',
help='Kernel image that indicates not to use a kernel, but to '
'use a raw disk image instead'),
cfg.StrOpt('multi_instance_display_name_template',
default='%(name)s-%(count)d',
help='When creating multiple instances with a single request '
'using the os-multiple-create API extension, this '
'template will be used to build the display name for '
'each instance. The benefit is that the instances '
'end up with different hostnames. To restore legacy '
'behavior of every instance having the same name, set '
'this option to "%(name)s". Valid keys for the '
'template are: name, uuid, count.'),
cfg.IntOpt('max_local_block_devices',
default=3,
help='Maximum number of devices that will result '
'in a local image being created on the hypervisor node. '
'Setting this to 0 means nova will allow only '
'boot from volume. A negative number means unlimited.'),
]
ephemeral_storage_encryption_group = cfg.OptGroup(
name='ephemeral_storage_encryption',
title='Ephemeral storage encryption options')
ephemeral_storage_encryption_opts = [
cfg.BoolOpt('enabled',
default=False,
help='Whether to encrypt ephemeral storage'),
cfg.StrOpt('cipher',
default='aes-xts-plain64',
help='The cipher and mode to be used to encrypt ephemeral '
'storage. Which ciphers are available ciphers depends '
'on kernel support. See /proc/crypto for the list of '
'available options.'),
cfg.IntOpt('key_size',
default=512,
help='The bit length of the encryption key to be used to '
'encrypt ephemeral storage (in XTS mode only half of '
'the bits are used for encryption key)')
]
CONF = cfg.CONF
CONF.register_opts(compute_opts)
CONF.register_group(ephemeral_storage_encryption_group)
CONF.register_opts(ephemeral_storage_encryption_opts,
group='ephemeral_storage_encryption')
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
CONF.import_opt('enable', 'nova.cells.opts', group='cells')
CONF.import_opt('default_ephemeral_format', 'nova.virt.driver')
MAX_USERDATA_SIZE = 65535
RO_SECURITY_GROUPS = ['default']
VIDEO_RAM = 'hw_video:ram_max_mb'
AGGREGATE_ACTION_UPDATE = 'Update'
AGGREGATE_ACTION_UPDATE_META = 'UpdateMeta'
AGGREGATE_ACTION_DELETE = 'Delete'
AGGREGATE_ACTION_ADD = 'Add'
def check_instance_state(vm_state=None, task_state=(None,),
must_have_launched=True):
"""Decorator to check VM and/or task state before entry to API functions.
If the instance is in the wrong state, or has not been successfully
started at least once the wrapper will raise an exception.
"""
if vm_state is not None and not isinstance(vm_state, set):
vm_state = set(vm_state)
if task_state is not None and not isinstance(task_state, set):
task_state = set(task_state)
def outer(f):
@functools.wraps(f)
def inner(self, context, instance, *args, **kw):
if vm_state is not None and instance.vm_state not in vm_state:
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method=f.__name__)
if (task_state is not None and
instance.task_state not in task_state):
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method=f.__name__)
if must_have_launched and not instance.launched_at:
raise exception.InstanceInvalidState(
attr='launched_at',
instance_uuid=instance.uuid,
state=instance.launched_at,
method=f.__name__)
return f(self, context, instance, *args, **kw)
return inner
return outer
def check_instance_host(function):
@functools.wraps(function)
def wrapped(self, context, instance, *args, **kwargs):
if not instance.host:
raise exception.InstanceNotReady(instance_id=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return wrapped
def check_instance_lock(function):
@functools.wraps(function)
def inner(self, context, instance, *args, **kwargs):
if instance.locked and not context.is_admin:
raise exception.InstanceIsLocked(instance_uuid=instance.uuid)
return function(self, context, instance, *args, **kwargs)
return inner
def policy_decorator(scope):
"""Check corresponding policy prior of wrapped method to execution."""
def outer(func):
@functools.wraps(func)
def wrapped(self, context, target, *args, **kwargs):
if not self.skip_policy_check:
check_policy(context, func.__name__, target, scope)
return func(self, context, target, *args, **kwargs)
return wrapped
return outer
wrap_check_policy = policy_decorator(scope='compute')
wrap_check_security_groups_policy = policy_decorator(
scope='compute:security_groups')
def check_policy(context, action, target, scope='compute'):
_action = '%s:%s' % (scope, action)
nova.policy.enforce(context, _action, target)
def check_instance_cell(fn):
def _wrapped(self, context, instance, *args, **kwargs):
self._validate_cell(instance, fn.__name__)
return fn(self, context, instance, *args, **kwargs)
_wrapped.__name__ = fn.__name__
return _wrapped
def _diff_dict(orig, new):
"""Return a dict describing how to change orig to new. The keys
correspond to values that have changed; the value will be a list
of one or two elements. The first element of the list will be
either '+' or '-', indicating whether the key was updated or
deleted; if the key was updated, the list will contain a second
element, giving the updated value.
"""
# Figure out what keys went away
result = {k: ['-'] for k in set(orig.keys()) - set(new.keys())}
# Compute the updates
for key, value in new.items():
if key not in orig or value != orig[key]:
result[key] = ['+', value]
return result
class API(base.Base):
"""API for interacting with the compute manager."""
def __init__(self, image_api=None, network_api=None, volume_api=None,
security_group_api=None, skip_policy_check=False, **kwargs):
self.skip_policy_check = skip_policy_check
self.image_api = image_api or image.API()
self.network_api = network_api or network.API(
skip_policy_check=skip_policy_check)
self.volume_api = volume_api or volume.API()
self.security_group_api = (security_group_api or
openstack_driver.get_openstack_security_group_driver(
skip_policy_check=skip_policy_check))
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self._compute_task_api = None
self.servicegroup_api = servicegroup.API()
self.notifier = rpc.get_notifier('compute', CONF.host)
if CONF.ephemeral_storage_encryption.enabled:
self.key_manager = keymgr.API()
super(API, self).__init__(**kwargs)
@property
def compute_task_api(self):
if self._compute_task_api is None:
# TODO(alaski): Remove calls into here from conductor manager so
# that this isn't necessary. #1180540
from nova import conductor
self._compute_task_api = conductor.ComputeTaskAPI()
return self._compute_task_api
@property
def cell_type(self):
try:
return getattr(self, '_cell_type')
except AttributeError:
self._cell_type = cells_opts.get_cell_type()
return self._cell_type
def _cell_read_only(self, cell_name):
"""Is the target cell in a read-only mode?"""
# FIXME(comstud): Add support for this.
return False
def _validate_cell(self, instance, method):
if self.cell_type != 'api':
return
cell_name = instance.cell_name
if not cell_name:
raise exception.InstanceUnknownCell(
instance_uuid=instance.uuid)
if self._cell_read_only(cell_name):
raise exception.InstanceInvalidState(
attr="vm_state",
instance_uuid=instance.uuid,
state="temporary_readonly",
method=method)
def _record_action_start(self, context, instance, action):
objects.InstanceAction.action_start(context, instance.uuid,
action, want_result=False)
def _check_injected_file_quota(self, context, injected_files):
"""Enforce quota limits on injected files.
Raises a QuotaError if any limit is exceeded.
"""
if injected_files is None:
return
# Check number of files first
try:
objects.Quotas.limit_check(context,
injected_files=len(injected_files))
except exception.OverQuota:
raise exception.OnsetFileLimitExceeded()
# OK, now count path and content lengths; we're looking for
# the max...
max_path = 0
max_content = 0
for path, content in injected_files:
max_path = max(max_path, len(path))
max_content = max(max_content, len(content))
try:
objects.Quotas.limit_check(context,
injected_file_path_bytes=max_path,
injected_file_content_bytes=max_content)
except exception.OverQuota as exc:
# Favor path limit over content limit for reporting
# purposes
if 'injected_file_path_bytes' in exc.kwargs['overs']:
raise exception.OnsetFilePathLimitExceeded()
else:
raise exception.OnsetFileContentLimitExceeded()
def _get_headroom(self, quotas, usages, deltas):
headroom = {res: quotas[res] -
(usages[res]['in_use'] + usages[res]['reserved'])
for res in quotas.keys()}
# If quota_cores is unlimited [-1]:
# - set cores headroom based on instances headroom:
if quotas.get('cores') == -1:
if deltas.get('cores'):
hc = headroom['instances'] * deltas['cores']
headroom['cores'] = hc / deltas.get('instances', 1)
else:
headroom['cores'] = headroom['instances']
# If quota_ram is unlimited [-1]:
# - set ram headroom based on instances headroom:
if quotas.get('ram') == -1:
if deltas.get('ram'):
hr = headroom['instances'] * deltas['ram']
headroom['ram'] = hr / deltas.get('instances', 1)
else:
headroom['ram'] = headroom['instances']
return headroom
def _check_num_instances_quota(self, context, instance_type, min_count,
max_count, project_id=None, user_id=None):
"""Enforce quota limits on number of instances created."""
# Determine requested cores and ram
req_cores = max_count * instance_type['vcpus']
vram_mb = int(instance_type.get('extra_specs', {}).get(VIDEO_RAM, 0))
req_ram = max_count * (instance_type['memory_mb'] + vram_mb)
# Check the quota
try:
quotas = objects.Quotas(context=context)
quotas.reserve(instances=max_count,
cores=req_cores, ram=req_ram,
project_id=project_id, user_id=user_id)
except exception.OverQuota as exc:
# OK, we exceeded quota; let's figure out why...
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
deltas = {'instances': max_count,
'cores': req_cores, 'ram': req_ram}
headroom = self._get_headroom(quotas, usages, deltas)
allowed = headroom['instances']
# Reduce 'allowed' instances in line with the cores & ram headroom
if instance_type['vcpus']:
allowed = min(allowed,
headroom['cores'] // instance_type['vcpus'])
if instance_type['memory_mb']:
allowed = min(allowed,
headroom['ram'] // (instance_type['memory_mb'] +
vram_mb))
# Convert to the appropriate exception message
if allowed <= 0:
msg = _("Cannot run any more instances of this type.")
elif min_count <= allowed <= max_count:
# We're actually OK, but still need reservations
return self._check_num_instances_quota(context, instance_type,
min_count, allowed)
else:
msg = (_("Can only run %s more instances of this type.") %
allowed)
num_instances = (str(min_count) if min_count == max_count else
"%s-%s" % (min_count, max_count))
requested = dict(instances=num_instances, cores=req_cores,
ram=req_ram)
(overs, reqs, total_alloweds, useds) = self._get_over_quota_detail(
headroom, overs, quotas, requested)
params = {'overs': overs, 'pid': context.project_id,
'min_count': min_count, 'max_count': max_count,
'msg': msg}
if min_count == max_count:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run %(min_count)d instances. "
"%(msg)s"), params)
else:
LOG.debug(("%(overs)s quota exceeded for %(pid)s,"
" tried to run between %(min_count)d and"
" %(max_count)d instances. %(msg)s"),
params)
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
return max_count, quotas
def _get_over_quota_detail(self, headroom, overs, quotas, requested):
reqs = []
useds = []
total_alloweds = []
for resource in overs:
reqs.append(str(requested[resource]))
useds.append(str(quotas[resource] - headroom[resource]))
total_alloweds.append(str(quotas[resource]))
(overs, reqs, useds, total_alloweds) = map(', '.join, (
overs, reqs, useds, total_alloweds))
return overs, reqs, total_alloweds, useds
def _check_metadata_properties_quota(self, context, metadata=None):
"""Enforce quota limits on metadata properties."""
if not metadata:
metadata = {}
if not isinstance(metadata, dict):
msg = (_("Metadata type should be dict."))
raise exception.InvalidMetadata(reason=msg)
num_metadata = len(metadata)
try:
objects.Quotas.limit_check(context, metadata_items=num_metadata)
except exception.OverQuota as exc:
quota_metadata = exc.kwargs['quotas']['metadata_items']
raise exception.MetadataLimitExceeded(allowed=quota_metadata)
# Because metadata is stored in the DB, we hard-code the size limits
# In future, we may support more variable length strings, so we act
# as if this is quota-controlled for forwards compatibility
for k, v in six.iteritems(metadata):
try:
utils.check_string_length(v)
utils.check_string_length(k, min_length=1)
except exception.InvalidInput as e:
raise exception.InvalidMetadata(reason=e.format_message())
# For backward compatible we need raise HTTPRequestEntityTooLarge
# so we need to keep InvalidMetadataSize exception here
if len(k) > 255:
msg = _("Metadata property key greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
if len(v) > 255:
msg = _("Metadata property value greater than 255 characters")
raise exception.InvalidMetadataSize(reason=msg)
def _check_requested_secgroups(self, context, secgroups):
"""Check if the security group requested exists and belongs to
the project.
"""
for secgroup in secgroups:
# NOTE(sdague): default is handled special
if secgroup == "default":
continue
if not self.security_group_api.get(context, secgroup):
raise exception.SecurityGroupNotFoundForProject(
project_id=context.project_id, security_group_id=secgroup)
def _check_requested_networks(self, context, requested_networks,
max_count):
"""Check if the networks requested belongs to the project
and the fixed IP address for each network provided is within
same the network block
"""
if requested_networks is not None:
# NOTE(danms): Temporary transition
requested_networks = requested_networks.as_tuples()
return self.network_api.validate_networks(context, requested_networks,
max_count)
def _handle_kernel_and_ramdisk(self, context, kernel_id, ramdisk_id,
image):
"""Choose kernel and ramdisk appropriate for the instance.
The kernel and ramdisk can be chosen in one of three ways:
1. Passed in with create-instance request.
2. Inherited from image.
3. Forced to None by using `null_kernel` FLAG.
"""
# Inherit from image if not specified
image_properties = image.get('properties', {})
if kernel_id is None:
kernel_id = image_properties.get('kernel_id')
if ramdisk_id is None:
ramdisk_id = image_properties.get('ramdisk_id')
# Force to None if using null_kernel
if kernel_id == str(CONF.null_kernel):
kernel_id = None
ramdisk_id = None
# Verify kernel and ramdisk exist (fail-fast)
if kernel_id is not None:
kernel_image = self.image_api.get(context, kernel_id)
# kernel_id could have been a URI, not a UUID, so to keep behaviour
# from before, which leaked that implementation detail out to the
# caller, we return the image UUID of the kernel image and ramdisk
# image (below) and not any image URIs that might have been
# supplied.
# TODO(jaypipes): Get rid of this silliness once we move to a real
# Image object and hide all of that stuff within nova.image.api.
kernel_id = kernel_image['id']
if ramdisk_id is not None:
ramdisk_image = self.image_api.get(context, ramdisk_id)
ramdisk_id = ramdisk_image['id']
return kernel_id, ramdisk_id
@staticmethod
def _handle_availability_zone(context, availability_zone):
# NOTE(vish): We have a legacy hack to allow admins to specify hosts
# via az using az:host:node. It might be nice to expose an
# api to specify specific hosts to force onto, but for
# now it just supports this legacy hack.
# NOTE(deva): It is also possible to specify az::node, in which case
# the host manager will determine the correct host.
forced_host = None
forced_node = None
if availability_zone and ':' in availability_zone:
c = availability_zone.count(':')
if c == 1:
availability_zone, forced_host = availability_zone.split(':')
elif c == 2:
if '::' in availability_zone:
availability_zone, forced_node = \
availability_zone.split('::')
else:
availability_zone, forced_host, forced_node = \
availability_zone.split(':')
else:
raise exception.InvalidInput(
reason="Unable to parse availability_zone")
if not availability_zone:
availability_zone = CONF.default_schedule_zone
return availability_zone, forced_host, forced_node
def _ensure_auto_disk_config_is_valid(self, auto_disk_config_img,
auto_disk_config, image):
auto_disk_config_disabled = \
utils.is_auto_disk_config_disabled(auto_disk_config_img)
if auto_disk_config_disabled and auto_disk_config:
raise exception.AutoDiskConfigDisabledByImage(image=image)
def _inherit_properties_from_image(self, image, auto_disk_config):
image_properties = image.get('properties', {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_properties)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image.get("id"))
if auto_disk_config is None:
auto_disk_config = strutils.bool_from_string(auto_disk_config_img)
return {
'os_type': image_properties.get('os_type'),
'architecture': image_properties.get('architecture'),
'vm_mode': image_properties.get('vm_mode'),
'auto_disk_config': auto_disk_config
}
def _apply_instance_name_template(self, context, instance, index):
params = {
'uuid': instance.uuid,
'name': instance.display_name,
'count': index + 1,
}
try:
new_name = (CONF.multi_instance_display_name_template %
params)
except (KeyError, TypeError):
LOG.exception(_LE('Failed to set instance name using '
'multi_instance_display_name_template.'))
new_name = instance.display_name
instance.display_name = new_name
if not instance.get('hostname', None):
instance.hostname = utils.sanitize_hostname(new_name)
instance.save()
return instance
def _check_config_drive(self, config_drive):
if config_drive:
try:
bool_val = strutils.bool_from_string(config_drive,
strict=True)
except ValueError:
raise exception.ConfigDriveInvalidValue(option=config_drive)
else:
bool_val = False
# FIXME(comstud): Bug ID 1193438 filed for this. This looks silly,
# but this is because the config drive column is a String. False
# is represented by using an empty string. And for whatever
# reason, we rely on the DB to cast True to a String.
return True if bool_val else ''
def _check_requested_image(self, context, image_id, image,
instance_type, root_bdm):
if not image:
return
if image['status'] != 'active':
raise exception.ImageNotActive(image_id=image_id)
image_properties = image.get('properties', {})
config_drive_option = image_properties.get(
'img_config_drive', 'optional')
if config_drive_option not in ['optional', 'mandatory']:
raise exception.InvalidImageConfigDrive(
config_drive=config_drive_option)
if instance_type['memory_mb'] < int(image.get('min_ram') or 0):
raise exception.FlavorMemoryTooSmall()
# Image min_disk is in gb, size is in bytes. For sanity, have them both
# in bytes.
image_min_disk = int(image.get('min_disk') or 0) * units.Gi
image_size = int(image.get('size') or 0)
# Target disk is a volume. Don't check flavor disk size because it
# doesn't make sense, and check min_disk against the volume size.
if (root_bdm is not None and root_bdm.is_volume):
# There are 2 possibilities here: either the target volume already
# exists, or it doesn't, in which case the bdm will contain the
# intended volume size.
#
# Cinder does its own check against min_disk, so if the target
# volume already exists this has already been done and we don't
# need to check it again here. In this case, volume_size may not be
# set on the bdm.
#
# If we're going to create the volume, the bdm will contain
# volume_size. Therefore we should check it if it exists. This will
# still be checked again by cinder when the volume is created, but
# that will not happen until the request reaches a host. By
# checking it here, the user gets an immediate and useful failure
# indication.
#
# The third possibility is that we have failed to consider
# something, and there are actually more than 2 possibilities. In
# this case cinder will still do the check at volume creation time.
# The behaviour will still be correct, but the user will not get an
# immediate failure from the api, and will instead have to
# determine why the instance is in an error state with a task of
# block_device_mapping.
#
# We could reasonably refactor this check into _validate_bdm at
# some future date, as the various size logic is already split out
# in there.
dest_size = root_bdm.volume_size
if dest_size is not None:
dest_size *= units.Gi
if image_min_disk > dest_size:
raise exception.VolumeSmallerThanMinDisk(
volume_size=dest_size, image_min_disk=image_min_disk)
# Target disk is a local disk whose size is taken from the flavor
else:
dest_size = instance_type['root_gb'] * units.Gi
# NOTE(johannes): root_gb is allowed to be 0 for legacy reasons
# since libvirt interpreted the value differently than other
# drivers. A value of 0 means don't check size.
if dest_size != 0:
if image_size > dest_size:
raise exception.FlavorDiskSmallerThanImage(
flavor_size=dest_size, image_size=image_size)
if image_min_disk > dest_size:
raise exception.FlavorDiskSmallerThanMinDisk(
flavor_size=dest_size, image_min_disk=image_min_disk)
def _get_image_defined_bdms(self, base_options, instance_type, image_meta,
root_device_name):
image_properties = image_meta.get('properties', {})
# Get the block device mappings defined by the image.
image_defined_bdms = image_properties.get('block_device_mapping', [])
legacy_image_defined = not image_properties.get('bdm_v2', False)
image_mapping = image_properties.get('mappings', [])
if legacy_image_defined:
image_defined_bdms = block_device.from_legacy_mapping(
image_defined_bdms, None, root_device_name)
else:
image_defined_bdms = map(block_device.BlockDeviceDict,
image_defined_bdms)
if image_mapping:
image_defined_bdms += self._prepare_image_mapping(
instance_type, image_mapping)
return image_defined_bdms
def _get_flavor_defined_bdms(self, instance_type, block_device_mapping):
flavor_defined_bdms = []
have_ephemeral_bdms = any(filter(
block_device.new_format_is_ephemeral, block_device_mapping))
have_swap_bdms = any(filter(
block_device.new_format_is_swap, block_device_mapping))
if instance_type.get('ephemeral_gb') and not have_ephemeral_bdms:
flavor_defined_bdms.append(
block_device.create_blank_bdm(instance_type['ephemeral_gb']))
if instance_type.get('swap') and not have_swap_bdms:
flavor_defined_bdms.append(
block_device.create_blank_bdm(instance_type['swap'], 'swap'))
return flavor_defined_bdms
def _merge_with_image_bdms(self, block_device_mapping, image_mappings):
"""Override any block devices from the image by device name"""
device_names = set(bdm['device_name'] for bdm in block_device_mapping
if bdm['device_name'])
return (block_device_mapping +
[bdm for bdm in image_mappings
if bdm['device_name'] not in device_names])
def _check_and_transform_bdm(self, context, base_options, instance_type,
image_meta, min_count, max_count,
block_device_mapping, legacy_bdm):
# NOTE (ndipanov): Assume root dev name is 'vda' if not supplied.
# It's needed for legacy conversion to work.
root_device_name = (base_options.get('root_device_name') or 'vda')
image_ref = base_options.get('image_ref', '')
# If the instance is booted by image and has a volume attached,
# the volume cannot have the same device name as root_device_name
if image_ref:
for bdm in block_device_mapping:
if (bdm.get('source_type') == 'volume' and
block_device.strip_dev(bdm.get(
'device_name')) == root_device_name):
msg = _('The volume cannot be assigned the same device'
' name as the root device %s') % root_device_name
raise exception.InvalidRequest(msg)
image_defined_bdms = self._get_image_defined_bdms(
base_options, instance_type, image_meta, root_device_name)
root_in_image_bdms = (
block_device.get_root_bdm(image_defined_bdms) is not None)
if legacy_bdm:
block_device_mapping = block_device.from_legacy_mapping(
block_device_mapping, image_ref, root_device_name,
no_root=root_in_image_bdms)
elif root_in_image_bdms:
# NOTE (ndipanov): client will insert an image mapping into the v2
# block_device_mapping, but if there is a bootable device in image
# mappings - we need to get rid of the inserted image
# NOTE (gibi): another case is when a server is booted with an
# image to bdm mapping where the image only contains a bdm to a
# snapshot. In this case the other image to bdm mapping
# contains an unnecessary device with boot_index == 0.
# Also in this case the image_ref is None as we are booting from
# an image to volume bdm.
def not_image_and_root_bdm(bdm):
return not (bdm.get('boot_index') == 0 and
bdm.get('source_type') == 'image')
block_device_mapping = (
filter(not_image_and_root_bdm, block_device_mapping))
block_device_mapping = self._merge_with_image_bdms(
block_device_mapping, image_defined_bdms)
if min_count > 1 or max_count > 1:
if any(map(lambda bdm: bdm['source_type'] == 'volume',
block_device_mapping)):
msg = _('Cannot attach one or more volumes to multiple'
' instances')
raise exception.InvalidRequest(msg)
block_device_mapping += self._get_flavor_defined_bdms(
instance_type, block_device_mapping)
return block_device_obj.block_device_make_list_from_dicts(
context, block_device_mapping)
def _get_image(self, context, image_href):
if not image_href:
return None, {}
image = self.image_api.get(context, image_href)
return image['id'], image
def _checks_for_create_and_rebuild(self, context, image_id, image,
instance_type, metadata,
files_to_inject, root_bdm):
self._check_metadata_properties_quota(context, metadata)
self._check_injected_file_quota(context, files_to_inject)
self._check_requested_image(context, image_id, image,
instance_type, root_bdm)
def _validate_and_build_base_options(self, context, instance_type,
boot_meta, image_href, image_id,
kernel_id, ramdisk_id, display_name,
display_description, key_name,
key_data, security_groups,
availability_zone, forced_host,
user_data, metadata,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
auto_disk_config, reservation_id,
max_count):
"""Verify all the input parameters regardless of the provisioning
strategy being performed.
"""
if availability_zone:
available_zones = availability_zones.\
get_availability_zones(context.elevated(), True)
if forced_host is None and availability_zone not in \
available_zones:
msg = _('The requested availability zone is not available')
raise exception.InvalidRequest(msg)
if instance_type['disabled']:
raise exception.FlavorNotFound(flavor_id=instance_type['id'])
if user_data:
l = len(user_data)
if l > MAX_USERDATA_SIZE:
# NOTE(mikal): user_data is stored in a text column, and
# the database might silently truncate if its over length.
raise exception.InstanceUserDataTooLarge(
length=l, maxsize=MAX_USERDATA_SIZE)
try:
base64.decodestring(user_data)
except base64.binascii.Error:
raise exception.InstanceUserDataMalformed()
self._check_requested_secgroups(context, security_groups)
# Note: max_count is the number of instances requested by the user,
# max_network_count is the maximum number of instances taking into
# account any network quotas
max_network_count = self._check_requested_networks(context,
requested_networks, max_count)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, kernel_id, ramdisk_id, boot_meta)
config_drive = self._check_config_drive(config_drive)
if key_data is None and key_name is not None:
key_pair = objects.KeyPair.get_by_name(context,
context.user_id,
key_name)
key_data = key_pair.public_key
root_device_name = block_device.prepend_dev(
block_device.properties_root_device_name(
boot_meta.get('properties', {})))
image_meta = objects.ImageMeta.from_dict(boot_meta)
numa_topology = hardware.numa_get_constraints(
instance_type, image_meta)
system_metadata = {}
# PCI requests come from two sources: instance flavor and
# requested_networks. The first call in below returns an
# InstancePCIRequests object which is a list of InstancePCIRequest
# objects. The second call in below creates an InstancePCIRequest
# object for each SR-IOV port, and append it to the list in the
# InstancePCIRequests object
pci_request_info = pci_request.get_pci_requests_from_flavor(
instance_type)
self.network_api.create_pci_requests_for_sriov_ports(context,
pci_request_info, requested_networks)
base_options = {
'reservation_id': reservation_id,
'image_ref': image_href,
'kernel_id': kernel_id or '',
'ramdisk_id': ramdisk_id or '',
'power_state': power_state.NOSTATE,
'vm_state': vm_states.BUILDING,
'config_drive': config_drive,
'user_id': context.user_id,
'project_id': context.project_id,
'instance_type_id': instance_type['id'],
'memory_mb': instance_type['memory_mb'],
'vcpus': instance_type['vcpus'],
'root_gb': instance_type['root_gb'],
'ephemeral_gb': instance_type['ephemeral_gb'],
'display_name': display_name,
'display_description': display_description or '',
'user_data': user_data,
'key_name': key_name,
'key_data': key_data,
'locked': False,
'metadata': metadata or {},
'access_ip_v4': access_ip_v4,
'access_ip_v6': access_ip_v6,
'availability_zone': availability_zone,
'root_device_name': root_device_name,
'progress': 0,
'pci_requests': pci_request_info,
'numa_topology': numa_topology,
'system_metadata': system_metadata}
options_from_image = self._inherit_properties_from_image(
boot_meta, auto_disk_config)
base_options.update(options_from_image)
# return the validated options and maximum number of instances allowed
# by the network quotas
return base_options, max_network_count
def _build_filter_properties(self, context, scheduler_hints, forced_host,
forced_node, instance_type):
filter_properties = dict(scheduler_hints=scheduler_hints)
filter_properties['instance_type'] = instance_type
if forced_host:
filter_properties['force_hosts'] = [forced_host]
if forced_node:
filter_properties['force_nodes'] = [forced_node]
return filter_properties
def _provision_instances(self, context, instance_type, min_count,
max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota):
# Reserve quotas
num_instances, quotas = self._check_num_instances_quota(
context, instance_type, min_count, max_count)
LOG.debug("Going to run %s instances..." % num_instances)
instances = []
try:
for i in range(num_instances):
instance = objects.Instance(context=context)
instance.update(base_options)
instance = self.create_db_entry_for_new_instance(
context, instance_type, boot_meta, instance,
security_groups, block_device_mapping,
num_instances, i, shutdown_terminate)
instances.append(instance)
if instance_group:
if check_server_group_quota:
count = objects.Quotas.count(context,
'server_group_members',
instance_group,
context.user_id)
try:
objects.Quotas.limit_check(context,
server_group_members=count + 1)
except exception.OverQuota:
msg = _("Quota exceeded, too many servers in "
"group")
raise exception.QuotaError(msg)
objects.InstanceGroup.add_members(context,
instance_group.uuid,
[instance.uuid])
# send a state update notification for the initial create to
# show it going from non-existent to BUILDING
notifications.send_update_with_states(context, instance, None,
vm_states.BUILDING, None, None, service="api")
# In the case of any exceptions, attempt DB cleanup and rollback the
# quota reservations.
except Exception:
with excutils.save_and_reraise_exception():
try:
for instance in instances:
try:
instance.destroy()
except exception.ObjectActionError:
pass
finally:
quotas.rollback()
# Commit the reservations
quotas.commit()
return instances
def _get_bdm_image_metadata(self, context, block_device_mapping,
legacy_bdm=True):
"""If we are booting from a volume, we need to get the
volume details from Cinder and make sure we pass the
metadata back accordingly.
"""
if not block_device_mapping:
return {}
for bdm in block_device_mapping:
if (legacy_bdm and
block_device.get_device_letter(
bdm.get('device_name', '')) != 'a'):
continue
elif not legacy_bdm and bdm.get('boot_index') != 0:
continue
volume_id = bdm.get('volume_id')
snapshot_id = bdm.get('snapshot_id')
if snapshot_id:
# NOTE(alaski): A volume snapshot inherits metadata from the
# originating volume, but the API does not expose metadata
# on the snapshot itself. So we query the volume for it below.
snapshot = self.volume_api.get_snapshot(context, snapshot_id)
volume_id = snapshot['volume_id']
if bdm.get('image_id'):
try:
image_id = bdm['image_id']
image_meta = self.image_api.get(context, image_id)
return image_meta
except Exception:
raise exception.InvalidBDMImage(id=image_id)
elif volume_id:
try:
volume = self.volume_api.get(context, volume_id)
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
if not volume.get('bootable', True):
raise exception.InvalidBDMVolumeNotBootable(id=volume_id)
return utils.get_image_metadata_from_volume(volume)
return {}
@staticmethod
def _get_requested_instance_group(context, scheduler_hints,
check_quota):
if not scheduler_hints:
return
group_hint = scheduler_hints.get('group')
if not group_hint:
return
if not uuidutils.is_uuid_like(group_hint):
msg = _('Server group scheduler hint must be a UUID.')
raise exception.InvalidInput(reason=msg)
return objects.InstanceGroup.get_by_uuid(context, group_hint)
def _create_instance(self, context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_groups,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
reservation_id=None, scheduler_hints=None,
legacy_bdm=True, shutdown_terminate=False,
check_server_group_quota=False):
"""Verify all the input parameters regardless of the provisioning
strategy being performed and schedule the instance(s) for
creation.
"""
# Normalize and setup some parameters
if reservation_id is None:
reservation_id = utils.generate_uid('r')
security_groups = security_groups or ['default']
min_count = min_count or 1
max_count = max_count or min_count
block_device_mapping = block_device_mapping or []
if not instance_type:
instance_type = flavors.get_default_flavor()
if image_href:
image_id, boot_meta = self._get_image(context, image_href)
else:
image_id = None
boot_meta = self._get_bdm_image_metadata(
context, block_device_mapping, legacy_bdm)
self._check_auto_disk_config(image=boot_meta,
auto_disk_config=auto_disk_config)
handle_az = self._handle_availability_zone
availability_zone, forced_host, forced_node = handle_az(context,
availability_zone)
if not self.skip_policy_check and (forced_host or forced_node):
check_policy(context, 'create:forced_host', {})
base_options, max_net_count = self._validate_and_build_base_options(
context,
instance_type, boot_meta, image_href, image_id, kernel_id,
ramdisk_id, display_name, display_description,
key_name, key_data, security_groups, availability_zone,
forced_host, user_data, metadata, access_ip_v4,
access_ip_v6, requested_networks, config_drive,
auto_disk_config, reservation_id, max_count)
# max_net_count is the maximum number of instances requested by the
# user adjusted for any network quota constraints, including
# consideration of connections to each requested network
if max_net_count == 0:
raise exception.PortLimitExceeded()
elif max_net_count < max_count:
LOG.debug("max count reduced from %(max_count)d to "
"%(max_net_count)d due to network port quota",
{'max_count': max_count,
'max_net_count': max_net_count})
max_count = max_net_count
block_device_mapping = self._check_and_transform_bdm(context,
base_options, instance_type, boot_meta, min_count, max_count,
block_device_mapping, legacy_bdm)
# We can't do this check earlier because we need bdms from all sources
# to have been merged in order to get the root bdm.
self._checks_for_create_and_rebuild(context, image_id, boot_meta,
instance_type, metadata, injected_files,
block_device_mapping.root_bdm())
instance_group = self._get_requested_instance_group(context,
scheduler_hints, check_server_group_quota)
instances = self._provision_instances(context, instance_type,
min_count, max_count, base_options, boot_meta, security_groups,
block_device_mapping, shutdown_terminate,
instance_group, check_server_group_quota)
filter_properties = self._build_filter_properties(context,
scheduler_hints, forced_host,
forced_node, instance_type)
for instance in instances:
self._record_action_start(context, instance,
instance_actions.CREATE)
self.compute_task_api.build_instances(context,
instances=instances, image=boot_meta,
filter_properties=filter_properties,
admin_password=admin_password,
injected_files=injected_files,
requested_networks=requested_networks,
security_groups=security_groups,
block_device_mapping=block_device_mapping,
legacy_bdm=False)
return (instances, reservation_id)
@staticmethod
def _volume_size(instance_type, bdm):
size = bdm.get('volume_size')
# NOTE (ndipanov): inherit flavour size only for swap and ephemeral
if (size is None and bdm.get('source_type') == 'blank' and
bdm.get('destination_type') == 'local'):
if bdm.get('guest_format') == 'swap':
size = instance_type.get('swap', 0)
else:
size = instance_type.get('ephemeral_gb', 0)
return size
def _prepare_image_mapping(self, instance_type, mappings):
"""Extract and format blank devices from image mappings."""
prepared_mappings = []
for bdm in block_device.mappings_prepend_dev(mappings):
LOG.debug("Image bdm %s", bdm)
virtual_name = bdm['virtual']
if virtual_name == 'ami' or virtual_name == 'root':
continue
if not block_device.is_swap_or_ephemeral(virtual_name):
continue
guest_format = bdm.get('guest_format')
if virtual_name == 'swap':
guest_format = 'swap'
if not guest_format:
guest_format = CONF.default_ephemeral_format
values = block_device.BlockDeviceDict({
'device_name': bdm['device'],
'source_type': 'blank',
'destination_type': 'local',
'device_type': 'disk',
'guest_format': guest_format,
'delete_on_termination': True,
'boot_index': -1})
values['volume_size'] = self._volume_size(
instance_type, values)
if values['volume_size'] == 0:
continue
prepared_mappings.append(values)
return prepared_mappings
def _create_block_device_mapping(self, instance_type, instance_uuid,
block_device_mapping):
"""Create the BlockDeviceMapping objects in the db.
This method makes a copy of the list in order to avoid using the same
id field in case this is called for multiple instances.
"""
LOG.debug("block_device_mapping %s", block_device_mapping,
instance_uuid=instance_uuid)
instance_block_device_mapping = copy.deepcopy(block_device_mapping)
for bdm in instance_block_device_mapping:
bdm.volume_size = self._volume_size(instance_type, bdm)
if bdm.volume_size == 0:
continue
bdm.instance_uuid = instance_uuid
bdm.update_or_create()
def _validate_bdm(self, context, instance, instance_type, all_mappings):
def _subsequent_list(l):
return all(el + 1 == l[i + 1] for i, el in enumerate(l[:-1]))
# Make sure that the boot indexes make sense
boot_indexes = sorted([bdm.boot_index
for bdm in all_mappings
if bdm.boot_index is not None
and bdm.boot_index >= 0])
if 0 not in boot_indexes or not _subsequent_list(boot_indexes):
raise exception.InvalidBDMBootSequence()
for bdm in all_mappings:
# NOTE(vish): For now, just make sure the volumes are accessible.
# Additionally, check that the volume can be attached to this
# instance.
snapshot_id = bdm.snapshot_id
volume_id = bdm.volume_id
image_id = bdm.image_id
if (image_id is not None and
image_id != instance.get('image_ref')):
try:
self._get_image(context, image_id)
except Exception:
raise exception.InvalidBDMImage(id=image_id)
if (bdm.source_type == 'image' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Images with "
"destination_type 'volume' need to have a non-zero "
"size specified"))
elif volume_id is not None:
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context,
volume,
instance=instance)
bdm.volume_size = volume.get('size')
except (exception.CinderConnectionFailed,
exception.InvalidVolume):
raise
except Exception:
raise exception.InvalidBDMVolume(id=volume_id)
elif snapshot_id is not None:
try:
snap = self.volume_api.get_snapshot(context, snapshot_id)
bdm.volume_size = bdm.volume_size or snap.get('size')
except exception.CinderConnectionFailed:
raise
except Exception:
raise exception.InvalidBDMSnapshot(id=snapshot_id)
elif (bdm.source_type == 'blank' and
bdm.destination_type == 'volume' and
not bdm.volume_size):
raise exception.InvalidBDM(message=_("Blank volumes "
"(source: 'blank', dest: 'volume') need to have non-zero "
"size"))
ephemeral_size = sum(bdm.volume_size or 0
for bdm in all_mappings
if block_device.new_format_is_ephemeral(bdm))
if ephemeral_size > instance_type['ephemeral_gb']:
raise exception.InvalidBDMEphemeralSize()
# There should be only one swap
swap_list = block_device.get_bdm_swap_list(all_mappings)
if len(swap_list) > 1:
msg = _("More than one swap drive requested.")
raise exception.InvalidBDMFormat(details=msg)
if swap_list:
swap_size = swap_list[0].volume_size or 0
if swap_size > instance_type['swap']:
raise exception.InvalidBDMSwapSize()
max_local = CONF.max_local_block_devices
if max_local >= 0:
num_local = len([bdm for bdm in all_mappings
if bdm.destination_type == 'local'])
if num_local > max_local:
raise exception.InvalidBDMLocalsLimit()
def _populate_instance_names(self, instance, num_instances):
"""Populate instance display_name and hostname."""
display_name = instance.get('display_name')
if instance.obj_attr_is_set('hostname'):
hostname = instance.get('hostname')
else:
hostname = None
if display_name is None:
display_name = self._default_display_name(instance.uuid)
instance.display_name = display_name
if hostname is None and num_instances == 1:
# NOTE(russellb) In the multi-instance case, we're going to
# overwrite the display_name using the
# multi_instance_display_name_template. We need the default
# display_name set so that it can be used in the template, though.
# Only set the hostname here if we're only creating one instance.
# Otherwise, it will be built after the template based
# display_name.
hostname = display_name
instance.hostname = utils.sanitize_hostname(hostname)
def _default_display_name(self, instance_uuid):
return "Server %s" % instance_uuid
def _populate_instance_for_create(self, context, instance, image,
index, security_groups, instance_type):
"""Build the beginning of a new instance."""
if not instance.obj_attr_is_set('uuid'):
# Generate the instance_uuid here so we can use it
# for additional setup before creating the DB entry.
instance.uuid = str(uuid.uuid4())
instance.launch_index = index
instance.vm_state = vm_states.BUILDING
instance.task_state = task_states.SCHEDULING
info_cache = objects.InstanceInfoCache()
info_cache.instance_uuid = instance.uuid
info_cache.network_info = network_model.NetworkInfo()
instance.info_cache = info_cache
instance.flavor = instance_type
instance.old_flavor = None
instance.new_flavor = None
if CONF.ephemeral_storage_encryption.enabled:
instance.ephemeral_key_uuid = self.key_manager.create_key(
context,
length=CONF.ephemeral_storage_encryption.key_size)
else:
instance.ephemeral_key_uuid = None
# Store image properties so we can use them later
# (for notifications, etc). Only store what we can.
if not instance.obj_attr_is_set('system_metadata'):
instance.system_metadata = {}
# Make sure we have the dict form that we need for instance_update.
instance.system_metadata = utils.instance_sys_meta(instance)
system_meta = utils.get_system_metadata_from_image(
image, instance_type)
# In case we couldn't find any suitable base_image
system_meta.setdefault('image_base_image_ref', instance.image_ref)
instance.system_metadata.update(system_meta)
self.security_group_api.populate_security_groups(instance,
security_groups)
return instance
# NOTE(bcwaldon): No policy check since this is only used by scheduler and
# the compute api. That should probably be cleaned up, though.
def create_db_entry_for_new_instance(self, context, instance_type, image,
instance, security_group, block_device_mapping, num_instances,
index, shutdown_terminate=False):
"""Create an entry in the DB for this new instance,
including any related table updates (such as security group,
etc).
This is called by the scheduler after a location for the
instance has been determined.
"""
self._populate_instance_for_create(context, instance, image, index,
security_group, instance_type)
self._populate_instance_names(instance, num_instances)
instance.shutdown_terminate = shutdown_terminate
self.security_group_api.ensure_default(context)
instance.create()
if num_instances > 1:
# NOTE(russellb) We wait until this spot to handle
# multi_instance_display_name_template, because we need
# the UUID from the instance.
instance = self._apply_instance_name_template(context, instance,
index)
# NOTE (ndipanov): This can now raise exceptions but the instance
# has been created, so delete it and re-raise so
# that other cleanup can happen.
try:
self._validate_bdm(
context, instance, instance_type, block_device_mapping)
except (exception.CinderConnectionFailed, exception.InvalidBDM,
exception.InvalidVolume):
with excutils.save_and_reraise_exception():
instance.destroy()
self._create_block_device_mapping(
instance_type, instance.uuid, block_device_mapping)
return instance
def _check_create_policies(self, context, availability_zone,
requested_networks, block_device_mapping):
"""Check policies for create()."""
target = {'project_id': context.project_id,
'user_id': context.user_id,
'availability_zone': availability_zone}
if not self.skip_policy_check:
check_policy(context, 'create', target)
if requested_networks and len(requested_networks):
check_policy(context, 'create:attach_network', target)
if block_device_mapping:
check_policy(context, 'create:attach_volume', target)
def _check_multiple_instances_neutron_ports(self, requested_networks):
"""Check whether multiple instances are created from port id(s)."""
for requested_net in requested_networks:
if requested_net.port_id:
msg = _("Unable to launch multiple instances with"
" a single configured port ID. Please launch your"
" instance one by one with different ports.")
raise exception.MultiplePortsNotApplicable(reason=msg)
def _check_multiple_instances_and_specified_ip(self, requested_networks):
"""Check whether multiple instances are created with specified ip."""
for requested_net in requested_networks:
if requested_net.network_id and requested_net.address:
msg = _("max_count cannot be greater than 1 if an fixed_ip "
"is specified.")
raise exception.InvalidFixedIpAndMaxCountRequest(reason=msg)
@hooks.add_hook("create_instance")
def create(self, context, instance_type,
image_href, kernel_id=None, ramdisk_id=None,
min_count=None, max_count=None,
display_name=None, display_description=None,
key_name=None, key_data=None, security_group=None,
availability_zone=None, user_data=None, metadata=None,
injected_files=None, admin_password=None,
block_device_mapping=None, access_ip_v4=None,
access_ip_v6=None, requested_networks=None, config_drive=None,
auto_disk_config=None, scheduler_hints=None, legacy_bdm=True,
shutdown_terminate=False, check_server_group_quota=False):
"""Provision instances, sending instance information to the
scheduler. The scheduler will determine where the instance(s)
go and will handle creating the DB entries.
Returns a tuple of (instances, reservation_id)
"""
self._check_create_policies(context, availability_zone,
requested_networks, block_device_mapping)
if requested_networks and max_count > 1:
self._check_multiple_instances_and_specified_ip(requested_networks)
if utils.is_neutron():
self._check_multiple_instances_neutron_ports(
requested_networks)
return self._create_instance(
context, instance_type,
image_href, kernel_id, ramdisk_id,
min_count, max_count,
display_name, display_description,
key_name, key_data, security_group,
availability_zone, user_data, metadata,
injected_files, admin_password,
access_ip_v4, access_ip_v6,
requested_networks, config_drive,
block_device_mapping, auto_disk_config,
scheduler_hints=scheduler_hints,
legacy_bdm=legacy_bdm,
shutdown_terminate=shutdown_terminate,
check_server_group_quota=check_server_group_quota)
def _check_auto_disk_config(self, instance=None, image=None,
**extra_instance_updates):
auto_disk_config = extra_instance_updates.get("auto_disk_config")
if auto_disk_config is None:
return
if not image and not instance:
return
if image:
image_props = image.get("properties", {})
auto_disk_config_img = \
utils.get_auto_disk_config_from_image_props(image_props)
image_ref = image.get("id")
else:
sys_meta = utils.instance_sys_meta(instance)
image_ref = sys_meta.get('image_base_image_ref')
auto_disk_config_img = \
utils.get_auto_disk_config_from_instance(sys_meta=sys_meta)
self._ensure_auto_disk_config_is_valid(auto_disk_config_img,
auto_disk_config,
image_ref)
def _delete(self, context, instance, delete_type, cb, **instance_attrs):
if instance.disable_terminate:
LOG.info(_LI('instance termination disabled'),
instance=instance)
return
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
# At these states an instance has a snapshot associate.
if instance.vm_state in (vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED):
snapshot_id = instance.system_metadata.get('shelved_image_id')
LOG.info(_LI("Working on deleting snapshot %s "
"from shelved instance..."),
snapshot_id, instance=instance)
try:
self.image_api.delete(context, snapshot_id)
except (exception.ImageNotFound,
exception.ImageNotAuthorized) as exc:
LOG.warning(_LW("Failed to delete snapshot "
"from shelved instance (%s)."),
exc.format_message(), instance=instance)
except Exception:
LOG.exception(_LE("Something wrong happened when trying to "
"delete snapshot from shelved instance."),
instance=instance)
original_task_state = instance.task_state
quotas = None
try:
# NOTE(maoy): no expected_task_state needs to be set
instance.update(instance_attrs)
instance.progress = 0
instance.save()
# NOTE(comstud): If we delete the instance locally, we'll
# commit the reservations here. Otherwise, the manager side
# will commit or rollback the reservations based on success.
quotas = self._create_reservations(context,
instance,
original_task_state,
project_id, user_id)
if self.cell_type == 'api':
# NOTE(comstud): If we're in the API cell, we need to
# skip all remaining logic and just call the callback,
# which will cause a cast to the child cell. Also,
# commit reservations here early until we have a better
# way to deal with quotas with cells.
cb(context, instance, bdms, reservations=None)
quotas.commit()
return
shelved_offloaded = (instance.vm_state
== vm_states.SHELVED_OFFLOADED)
if not instance.host and not shelved_offloaded:
try:
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.start" % delete_type)
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance,
"%s.end" % delete_type,
system_metadata=instance.system_metadata)
quotas.commit()
return
except exception.ObjectActionError:
instance.refresh()
if instance.vm_state == vm_states.RESIZED:
self._confirm_resize_on_deleting(context, instance)
is_local_delete = True
try:
if not shelved_offloaded:
service = objects.Service.get_by_compute_host(
context.elevated(), instance.host)
is_local_delete = not self.servicegroup_api.service_is_up(
service)
if not is_local_delete:
if original_task_state in (task_states.DELETING,
task_states.SOFT_DELETING):
LOG.info(_LI('Instance is already in deleting state, '
'ignoring this request'),
instance=instance)
quotas.rollback()
return
self._record_action_start(context, instance,
instance_actions.DELETE)
# NOTE(snikitin): If instance's vm_state is 'soft-delete',
# we should not count reservations here, because instance
# in soft-delete vm_state have already had quotas
# decremented. More details:
# https://bugs.launchpad.net/nova/+bug/1333145
if instance.vm_state == vm_states.SOFT_DELETED:
quotas.rollback()
cb(context, instance, bdms,
reservations=quotas.reservations)
except exception.ComputeHostNotFound:
pass
if is_local_delete:
# If instance is in shelved_offloaded state or compute node
# isn't up, delete instance from db and clean bdms info and
# network info
self._local_delete(context, instance, bdms, delete_type, cb)
quotas.commit()
except exception.InstanceNotFound:
# NOTE(comstud): Race condition. Instance already gone.
if quotas:
quotas.rollback()
except Exception:
with excutils.save_and_reraise_exception():
if quotas:
quotas.rollback()
def _confirm_resize_on_deleting(self, context, instance):
# If in the middle of a resize, use confirm_resize to
# ensure the original instance is cleaned up too
migration = None
for status in ('finished', 'confirming'):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, status)
LOG.info(_LI('Found an unconfirmed migration during delete, '
'id: %(id)s, status: %(status)s'),
{'id': migration.id,
'status': migration.status},
context=context, instance=instance)
break
except exception.MigrationNotFoundByStatus:
pass
if not migration:
LOG.info(_LI('Instance may have been confirmed during delete'),
context=context, instance=instance)
return
src_host = migration.source_compute
# Call since this can race with the terminate_instance.
# The resize is done but awaiting confirmation/reversion,
# so there are two cases:
# 1. up-resize: here -instance['vcpus'/'memory_mb'] match
# the quota usages accounted for this instance,
# so no further quota adjustment is needed
# 2. down-resize: here -instance['vcpus'/'memory_mb'] are
# shy by delta(old, new) from the quota usages accounted
# for this instance, so we must adjust
try:
deltas = compute_utils.downsize_quota_delta(context, instance)
except KeyError:
LOG.info(_LI('Migration %s may have been confirmed during '
'delete'),
migration.id, context=context, instance=instance)
return
quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance, migration,
src_host, quotas.reservations,
cast=False)
def _create_reservations(self, context, instance, original_task_state,
project_id, user_id):
instance_vcpus = instance.vcpus
instance_memory_mb = instance.memory_mb
# NOTE(wangpan): if the instance is resizing, and the resources
# are updated to new instance type, we should use
# the old instance type to create reservation.
# see https://bugs.launchpad.net/nova/+bug/1099729 for more details
if original_task_state in (task_states.RESIZE_MIGRATED,
task_states.RESIZE_FINISH):
try:
migration = objects.Migration.get_by_instance_and_status(
context.elevated(), instance.uuid, 'post-migrating')
except exception.MigrationNotFoundByStatus:
migration = None
if (migration and
instance.instance_type_id ==
migration.new_instance_type_id):
get_inst_attrs = compute_utils.get_inst_attrs_from_migration
instance_vcpus, instance_memory_mb = get_inst_attrs(migration,
instance)
quotas = objects.Quotas(context=context)
quotas.reserve(project_id=project_id,
user_id=user_id,
instances=-1,
cores=-instance_vcpus,
ram=-instance_memory_mb)
return quotas
def _local_delete(self, context, instance, bdms, delete_type, cb):
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
LOG.info(_LI("instance is in SHELVED_OFFLOADED state, cleanup"
" the instance's info from database."),
instance=instance)
else:
LOG.warning(_LW("instance's host %s is down, deleting from "
"database"), instance.host, instance=instance)
if instance.info_cache is not None:
instance.info_cache.delete()
else:
# NOTE(yoshimatsu): Avoid AttributeError if instance.info_cache
# is None. When the root cause that instance.info_cache becomes
# None is fixed, the log level should be reconsidered.
LOG.warning(_LW("Info cache for instance could not be found. "
"Ignore."), instance=instance)
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.start" % delete_type)
elevated = context.elevated()
if self.cell_type != 'api':
# NOTE(liusheng): In nova-network multi_host scenario,deleting
# network info of the instance may need instance['host'] as
# destination host of RPC call. If instance in SHELVED_OFFLOADED
# state, instance['host'] is None, here, use shelved_host as host
# to deallocate network info and reset instance['host'] after that.
# Here we shouldn't use instance.save(), because this will mislead
# user who may think the instance's host has been changed, and
# actually, the instance.host is always None.
orig_host = instance.host
try:
if instance.vm_state == vm_states.SHELVED_OFFLOADED:
sysmeta = getattr(instance,
obj_base.get_attrname('system_metadata'))
instance.host = sysmeta.get('shelved_host')
self.network_api.deallocate_for_instance(elevated,
instance)
finally:
instance.host = orig_host
# cleanup volumes
for bdm in bdms:
if bdm.is_volume:
# NOTE(vish): We don't have access to correct volume
# connector info, so just pass a fake
# connector. This can be improved when we
# expose get_volume_connector to rpc.
connector = {'ip': '127.0.0.1', 'initiator': 'iqn.fake'}
try:
self.volume_api.terminate_connection(context,
bdm.volume_id,
connector)
self.volume_api.detach(elevated, bdm.volume_id)
if bdm.delete_on_termination:
self.volume_api.delete(context, bdm.volume_id)
except Exception as exc:
err_str = _LW("Ignoring volume cleanup failure due to %s")
LOG.warn(err_str % exc, instance=instance)
bdm.destroy()
cb(context, instance, bdms, local=True)
sys_meta = instance.system_metadata
instance.destroy()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "%s.end" % delete_type,
system_metadata=sys_meta)
def _do_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations,
delete_type='delete')
def _do_force_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.terminate_instance(context, instance, bdms,
reservations=reservations,
delete_type='force_delete')
def _do_soft_delete(self, context, instance, bdms, reservations=None,
local=False):
if local:
instance.vm_state = vm_states.SOFT_DELETED
instance.task_state = None
instance.terminated_at = timeutils.utcnow()
instance.save()
else:
self.compute_rpcapi.soft_delete_instance(context, instance,
reservations=reservations)
# NOTE(maoy): we allow delete to be called no matter what vm_state says.
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=True)
def soft_delete(self, context, instance):
"""Terminate an instance."""
LOG.debug('Going to try to soft delete instance',
instance=instance)
self._delete(context, instance, 'soft_delete', self._do_soft_delete,
task_state=task_states.SOFT_DELETING,
deleted_at=timeutils.utcnow())
def _delete_instance(self, context, instance):
self._delete(context, instance, 'delete', self._do_delete,
task_state=task_states.DELETING)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=None, task_state=None,
must_have_launched=False)
def delete(self, context, instance):
"""Terminate an instance."""
LOG.debug("Going to try to terminate instance", instance=instance)
self._delete_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SOFT_DELETED])
def restore(self, context, instance):
"""Restore a previously deleted (but not reclaimed) instance."""
# Reserve quotas
flavor = instance.get_flavor()
project_id, user_id = quotas_obj.ids_from_instance(context, instance)
num_instances, quotas = self._check_num_instances_quota(
context, flavor, 1, 1,
project_id=project_id, user_id=user_id)
self._record_action_start(context, instance, instance_actions.RESTORE)
try:
if instance.host:
instance.task_state = task_states.RESTORING
instance.deleted_at = None
instance.save(expected_task_state=[None])
self.compute_rpcapi.restore_instance(context, instance)
else:
instance.vm_state = vm_states.ACTIVE
instance.task_state = None
instance.deleted_at = None
instance.save(expected_task_state=[None])
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
@wrap_check_policy
@check_instance_lock
@check_instance_state(must_have_launched=False)
def force_delete(self, context, instance):
"""Force delete an instance in any vm_state/task_state."""
self._delete(context, instance, 'force_delete', self._do_force_delete,
task_state=task_states.DELETING)
def force_stop(self, context, instance, do_cast=True, clean_shutdown=True):
LOG.debug("Going to try to stop instance", instance=instance)
instance.task_state = task_states.POWERING_OFF
instance.progress = 0
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.STOP)
self.compute_rpcapi.stop_instance(context, instance, do_cast=do_cast,
clean_shutdown=clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.ERROR])
def stop(self, context, instance, do_cast=True, clean_shutdown=True):
"""Stop an instance."""
self.force_stop(context, instance, do_cast, clean_shutdown)
@check_instance_lock
@check_instance_host
@check_instance_cell
@check_instance_state(vm_state=[vm_states.STOPPED])
def start(self, context, instance):
"""Start an instance."""
LOG.debug("Going to try to start instance", instance=instance)
instance.task_state = task_states.POWERING_ON
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.START)
# TODO(yamahata): injected_files isn't supported right now.
# It is used only for osapi. not for ec2 api.
# availability_zone isn't used by run_instance.
self.compute_rpcapi.start_instance(context, instance)
def get(self, context, instance_id, want_objects=False,
expected_attrs=None):
"""Get a single instance with the given instance_id."""
if not expected_attrs:
expected_attrs = []
expected_attrs.extend(['metadata', 'system_metadata',
'security_groups', 'info_cache'])
# NOTE(ameade): we still need to support integer ids for ec2
try:
if uuidutils.is_uuid_like(instance_id):
LOG.debug("Fetching instance by UUID",
instance_uuid=instance_id)
instance = objects.Instance.get_by_uuid(
context, instance_id, expected_attrs=expected_attrs)
elif strutils.is_int_like(instance_id):
LOG.debug("Fetching instance by numeric id %s", instance_id)
instance = objects.Instance.get_by_id(
context, instance_id, expected_attrs=expected_attrs)
else:
LOG.debug("Failed to fetch instance by id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
except exception.InvalidID:
LOG.debug("Invalid instance id %s", instance_id)
raise exception.InstanceNotFound(instance_id=instance_id)
if not self.skip_policy_check:
check_policy(context, 'get', instance)
if not want_objects:
instance = obj_base.obj_to_primitive(instance)
return instance
def get_all(self, context, search_opts=None, limit=None, marker=None,
want_objects=False, expected_attrs=None, sort_keys=None,
sort_dirs=None):
"""Get all instances filtered by one of the given parameters.
If there is no filter and the context is an admin, it will retrieve
all instances in the system.
Deleted instances will be returned by default, unless there is a
search option that says otherwise.
The results will be sorted based on the list of sort keys in the
'sort_keys' parameter (first value is primary sort key, second value is
secondary sort ket, etc.). For each sort key, the associated sort
direction is based on the list of sort directions in the 'sort_dirs'
parameter.
"""
# TODO(bcwaldon): determine the best argument for target here
target = {
'project_id': context.project_id,
'user_id': context.user_id,
}
if not self.skip_policy_check:
check_policy(context, "get_all", target)
if search_opts is None:
search_opts = {}
LOG.debug("Searching by: %s" % str(search_opts))
# Fixups for the DB call
filters = {}
def _remap_flavor_filter(flavor_id):
flavor = objects.Flavor.get_by_flavor_id(context, flavor_id)
filters['instance_type_id'] = flavor.id
def _remap_fixed_ip_filter(fixed_ip):
# Turn fixed_ip into a regexp match. Since '.' matches
# any character, we need to use regexp escaping for it.
filters['ip'] = '^%s$' % fixed_ip.replace('.', '\\.')
def _remap_metadata_filter(metadata):
filters['metadata'] = jsonutils.loads(metadata)
def _remap_system_metadata_filter(metadata):
filters['system_metadata'] = jsonutils.loads(metadata)
# search_option to filter_name mapping.
filter_mapping = {
'image': 'image_ref',
'name': 'display_name',
'tenant_id': 'project_id',
'flavor': _remap_flavor_filter,
'fixed_ip': _remap_fixed_ip_filter,
'metadata': _remap_metadata_filter,
'system_metadata': _remap_system_metadata_filter}
# copy from search_opts, doing various remappings as necessary
for opt, value in six.iteritems(search_opts):
# Do remappings.
# Values not in the filter_mapping table are copied as-is.
# If remapping is None, option is not copied
# If the remapping is a string, it is the filter_name to use
try:
remap_object = filter_mapping[opt]
except KeyError:
filters[opt] = value
else:
# Remaps are strings to translate to, or functions to call
# to do the translating as defined by the table above.
if isinstance(remap_object, six.string_types):
filters[remap_object] = value
else:
try:
remap_object(value)
# We already know we can't match the filter, so
# return an empty list
except ValueError:
return []
# IP address filtering cannot be applied at the DB layer, remove any DB
# limit so that it can be applied after the IP filter.
filter_ip = 'ip6' in filters or 'ip' in filters
orig_limit = limit
if filter_ip and limit:
LOG.debug('Removing limit for DB query due to IP filter')
limit = None
inst_models = self._get_instances_by_filters(context, filters,
limit=limit, marker=marker, expected_attrs=expected_attrs,
sort_keys=sort_keys, sort_dirs=sort_dirs)
if filter_ip:
inst_models = self._ip_filter(inst_models, filters, orig_limit)
if want_objects:
return inst_models
# Convert the models to dictionaries
instances = []
for inst_model in inst_models:
instances.append(obj_base.obj_to_primitive(inst_model))
return instances
@staticmethod
def _ip_filter(inst_models, filters, limit):
ipv4_f = re.compile(str(filters.get('ip')))
ipv6_f = re.compile(str(filters.get('ip6')))
def _match_instance(instance):
nw_info = compute_utils.get_nw_info_for_instance(instance)
for vif in nw_info:
for fixed_ip in vif.fixed_ips():
address = fixed_ip.get('address')
if not address:
continue
version = fixed_ip.get('version')
if ((version == 4 and ipv4_f.match(address)) or
(version == 6 and ipv6_f.match(address))):
return True
return False
result_objs = []
for instance in inst_models:
if _match_instance(instance):
result_objs.append(instance)
if limit and len(result_objs) == limit:
break
return objects.InstanceList(objects=result_objs)
def _get_instances_by_filters(self, context, filters,
limit=None, marker=None, expected_attrs=None,
sort_keys=None, sort_dirs=None):
fields = ['metadata', 'system_metadata', 'info_cache',
'security_groups']
if expected_attrs:
fields.extend(expected_attrs)
return objects.InstanceList.get_by_filters(
context, filters=filters, limit=limit, marker=marker,
expected_attrs=fields, sort_keys=sort_keys, sort_dirs=sort_dirs)
# NOTE(melwitt): We don't check instance lock for backup because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def backup(self, context, instance, name, backup_type, rotation,
extra_properties=None):
"""Backup the given instance
:param instance: nova.objects.instance.Instance object
:param name: name of the backup
:param backup_type: 'daily' or 'weekly'
:param rotation: int representing how many backups to keep around;
None if rotation shouldn't be used (as in the case of snapshots)
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
props_copy = dict(extra_properties, backup_type=backup_type)
if self.is_volume_backed_instance(context, instance):
LOG.info(_LI("It's not supported to backup volume backed "
"instance."), context=context, instance=instance)
raise exception.InvalidRequest()
else:
image_meta = self._create_image(context, instance,
name, 'backup',
extra_properties=props_copy)
# NOTE(comstud): Any changes to this method should also be made
# to the backup_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_BACKUP
instance.save(expected_task_state=[None])
self.compute_rpcapi.backup_instance(context, instance,
image_meta['id'],
backup_type,
rotation)
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@wrap_check_policy
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def snapshot(self, context, instance, name, extra_properties=None):
"""Snapshot the given instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the snapshot
:param extra_properties: dict of extra image properties to include
when creating the image.
:returns: A dict containing image metadata
"""
image_meta = self._create_image(context, instance, name,
'snapshot',
extra_properties=extra_properties)
# NOTE(comstud): Any changes to this method should also be made
# to the snapshot_instance() method in nova/cells/messaging.py
instance.task_state = task_states.IMAGE_SNAPSHOT_PENDING
instance.save(expected_task_state=[None])
self.compute_rpcapi.snapshot_instance(context, instance,
image_meta['id'])
return image_meta
def _create_image(self, context, instance, name, image_type,
extra_properties=None):
"""Create new image entry in the image service. This new image
will be reserved for the compute manager to upload a snapshot
or backup.
:param context: security context
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param image_type: snapshot | backup
:param extra_properties: dict of extra image properties to include
"""
properties = {
'instance_uuid': instance.uuid,
'user_id': str(context.user_id),
'image_type': image_type,
}
properties.update(extra_properties or {})
image_meta = self._initialize_instance_snapshot_metadata(
instance, name, properties)
return self.image_api.create(context, image_meta)
def _initialize_instance_snapshot_metadata(self, instance, name,
extra_properties=None):
"""Initialize new metadata for a snapshot of the given instance.
:param instance: nova.objects.instance.Instance object
:param name: string for name of the snapshot
:param extra_properties: dict of extra metadata properties to include
:returns: the new instance snapshot metadata
"""
image_meta = utils.get_image_from_system_metadata(
instance.system_metadata)
image_meta.update({'name': name,
'is_public': False})
# Delete properties that are non-inheritable
properties = image_meta['properties']
for key in CONF.non_inheritable_image_properties:
properties.pop(key, None)
# The properties in extra_properties have precedence
properties.update(extra_properties or {})
return image_meta
# NOTE(melwitt): We don't check instance lock for snapshot because lock is
# intended to prevent accidental change/delete of instances
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def snapshot_volume_backed(self, context, instance, name,
extra_properties=None):
"""Snapshot the given volume-backed instance.
:param instance: nova.objects.instance.Instance object
:param name: name of the backup or snapshot
:param extra_properties: dict of extra image properties to include
:returns: the new image metadata
"""
image_meta = self._initialize_instance_snapshot_metadata(
instance, name, extra_properties)
# the new image is simply a bucket of properties (particularly the
# block device mapping, kernel and ramdisk IDs) with no image data,
# hence the zero size
image_meta['size'] = 0
for attr in ('container_format', 'disk_format'):
image_meta.pop(attr, None)
properties = image_meta['properties']
# clean properties before filling
for key in ('block_device_mapping', 'bdm_v2', 'root_device_name'):
properties.pop(key, None)
if instance.root_device_name:
properties['root_device_name'] = instance.root_device_name
quiesced = False
if instance.vm_state == vm_states.ACTIVE:
try:
self.compute_rpcapi.quiesce_instance(context, instance)
quiesced = True
except (exception.InstanceQuiesceNotSupported,
exception.QemuGuestAgentNotEnabled,
exception.NovaException, NotImplementedError) as err:
if strutils.bool_from_string(properties.get(
'os_require_quiesce')):
raise
else:
LOG.info(_LI('Skipping quiescing instance: '
'%(reason)s.'), {'reason': err},
context=context, instance=instance)
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
mapping = []
for bdm in bdms:
if bdm.no_device:
continue
if bdm.is_volume:
# create snapshot based on volume_id
volume = self.volume_api.get(context, bdm.volume_id)
# NOTE(yamahata): Should we wait for snapshot creation?
# Linux LVM snapshot creation completes in
# short time, it doesn't matter for now.
name = _('snapshot for %s') % image_meta['name']
snapshot = self.volume_api.create_snapshot_force(
context, volume['id'], name, volume['display_description'])
mapping_dict = block_device.snapshot_from_bdm(snapshot['id'],
bdm)
mapping_dict = mapping_dict.get_image_mapping()
else:
mapping_dict = bdm.get_image_mapping()
mapping.append(mapping_dict)
if quiesced:
self.compute_rpcapi.unquiesce_instance(context, instance, mapping)
if mapping:
properties['block_device_mapping'] = mapping
properties['bdm_v2'] = True
return self.image_api.create(context, image_meta)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=set(
vm_states.ALLOW_SOFT_REBOOT + vm_states.ALLOW_HARD_REBOOT),
task_state=[None, task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.PAUSING,
task_states.SUSPENDING])
def reboot(self, context, instance, reboot_type):
"""Reboot the given instance."""
if (reboot_type == 'SOFT' and
(instance.vm_state not in vm_states.ALLOW_SOFT_REBOOT)):
raise exception.InstanceInvalidState(
attr='vm_state',
instance_uuid=instance.uuid,
state=instance.vm_state,
method='soft reboot')
if reboot_type == 'SOFT' and instance.task_state is not None:
raise exception.InstanceInvalidState(
attr='task_state',
instance_uuid=instance.uuid,
state=instance.task_state,
method='reboot')
expected_task_state = [None]
if reboot_type == 'HARD':
expected_task_state.extend([task_states.REBOOTING,
task_states.REBOOT_PENDING,
task_states.REBOOT_STARTED,
task_states.REBOOTING_HARD,
task_states.RESUMING,
task_states.UNPAUSING,
task_states.SUSPENDING])
state = {'SOFT': task_states.REBOOTING,
'HARD': task_states.REBOOTING_HARD}[reboot_type]
instance.task_state = state
instance.save(expected_task_state=expected_task_state)
self._record_action_start(context, instance, instance_actions.REBOOT)
self.compute_rpcapi.reboot_instance(context, instance=instance,
block_device_info=None,
reboot_type=reboot_type)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rebuild(self, context, instance, image_href, admin_password,
files_to_inject=None, **kwargs):
"""Rebuild the given instance with the provided attributes."""
orig_image_ref = instance.image_ref or ''
files_to_inject = files_to_inject or []
metadata = kwargs.get('metadata', {})
preserve_ephemeral = kwargs.get('preserve_ephemeral', False)
auto_disk_config = kwargs.get('auto_disk_config')
image_id, image = self._get_image(context, image_href)
self._check_auto_disk_config(image=image, **kwargs)
flavor = instance.get_flavor()
root_bdm = self._get_root_bdm(context, instance)
self._checks_for_create_and_rebuild(context, image_id, image,
flavor, metadata, files_to_inject, root_bdm)
kernel_id, ramdisk_id = self._handle_kernel_and_ramdisk(
context, None, None, image)
def _reset_image_metadata():
"""Remove old image properties that we're storing as instance
system metadata. These properties start with 'image_'.
Then add the properties for the new image.
"""
# FIXME(comstud): There's a race condition here in that if
# the system_metadata for this instance is updated after
# we do the previous save() and before we update.. those
# other updates will be lost. Since this problem exists in
# a lot of other places, I think it should be addressed in
# a DB layer overhaul.
orig_sys_metadata = dict(instance.system_metadata)
# Remove the old keys
for key in instance.system_metadata.keys():
if key.startswith(utils.SM_IMAGE_PROP_PREFIX):
del instance.system_metadata[key]
# Add the new ones
new_sys_metadata = utils.get_system_metadata_from_image(
image, flavor)
instance.system_metadata.update(new_sys_metadata)
instance.save()
return orig_sys_metadata
# Since image might have changed, we may have new values for
# os_type, vm_mode, etc
options_from_image = self._inherit_properties_from_image(
image, auto_disk_config)
instance.update(options_from_image)
instance.task_state = task_states.REBUILDING
instance.image_ref = image_href
instance.kernel_id = kernel_id or ""
instance.ramdisk_id = ramdisk_id or ""
instance.progress = 0
instance.update(kwargs)
instance.save(expected_task_state=[None])
# On a rebuild, since we're potentially changing images, we need to
# wipe out the old image properties that we're storing as instance
# system metadata... and copy in the properties for the new image.
orig_sys_metadata = _reset_image_metadata()
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
self._record_action_start(context, instance, instance_actions.REBUILD)
self.compute_task_api.rebuild_instance(context, instance=instance,
new_pass=admin_password, injected_files=files_to_inject,
image_ref=image_href, orig_image_ref=orig_image_ref,
orig_sys_metadata=orig_sys_metadata, bdms=bdms,
preserve_ephemeral=preserve_ephemeral, host=instance.host,
kwargs=kwargs)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def revert_resize(self, context, instance):
"""Reverts a resize, deleting the 'new' instance in the process."""
elevated = context.elevated()
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reverse quota reservation for increased resource usage
deltas = compute_utils.reverse_upsize_quota_delta(context, migration)
quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
instance.task_state = task_states.RESIZE_REVERTING
try:
instance.save(expected_task_state=[None])
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
migration.status = 'reverting'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance,
instance_actions.REVERT_RESIZE)
self.compute_rpcapi.revert_resize(context, instance,
migration,
migration.dest_compute,
quotas.reservations or [])
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.RESIZED])
def confirm_resize(self, context, instance, migration=None):
"""Confirms a migration/resize and deletes the 'old' instance."""
elevated = context.elevated()
if migration is None:
migration = objects.Migration.get_by_instance_and_status(
elevated, instance.uuid, 'finished')
# reserve quota only for any decrease in resource usage
deltas = compute_utils.downsize_quota_delta(context, instance)
quotas = compute_utils.reserve_quota_delta(context, deltas, instance)
migration.status = 'confirming'
migration.save()
# With cells, the best we can do right now is commit the reservations
# immediately...
if CONF.cells.enable:
quotas.commit()
self._record_action_start(context, instance,
instance_actions.CONFIRM_RESIZE)
self.compute_rpcapi.confirm_resize(context,
instance,
migration,
migration.source_compute,
quotas.reservations or [])
@staticmethod
def _resize_cells_support(context, quotas, instance,
current_instance_type, new_instance_type):
"""Special API cell logic for resize."""
# With cells, the best we can do right now is commit the
# reservations immediately...
quotas.commit()
# NOTE(johannes/comstud): The API cell needs a local migration
# record for later resize_confirm and resize_reverts to deal
# with quotas. We don't need source and/or destination
# information, just the old and new flavors. Status is set to
# 'finished' since nothing else will update the status along
# the way.
mig = objects.Migration(context=context.elevated())
mig.instance_uuid = instance.uuid
mig.old_instance_type_id = current_instance_type['id']
mig.new_instance_type_id = new_instance_type['id']
mig.status = 'finished'
mig.migration_type = (
mig.old_instance_type_id != mig.new_instance_type_id and
'resize' or 'migration')
mig.create()
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED])
def resize(self, context, instance, flavor_id=None, clean_shutdown=True,
**extra_instance_updates):
"""Resize (ie, migrate) a running instance.
If flavor_id is None, the process is considered a migration, keeping
the original flavor_id. If flavor_id is not None, the instance should
be migrated to a new host and resized to the new flavor_id.
"""
self._check_auto_disk_config(instance, **extra_instance_updates)
current_instance_type = instance.get_flavor()
# If flavor_id is not provided, only migrate the instance.
if not flavor_id:
LOG.debug("flavor_id is None. Assuming migration.",
instance=instance)
new_instance_type = current_instance_type
else:
new_instance_type = flavors.get_flavor_by_flavor_id(
flavor_id, read_deleted="no")
if (new_instance_type.get('root_gb') == 0 and
current_instance_type.get('root_gb') != 0 and
not self.is_volume_backed_instance(context, instance)):
reason = _('Resize to zero disk flavor is not allowed.')
raise exception.CannotResizeDisk(reason=reason)
if not new_instance_type:
raise exception.FlavorNotFound(flavor_id=flavor_id)
current_instance_type_name = current_instance_type['name']
new_instance_type_name = new_instance_type['name']
LOG.debug("Old instance type %(current_instance_type_name)s, "
" new instance type %(new_instance_type_name)s",
{'current_instance_type_name': current_instance_type_name,
'new_instance_type_name': new_instance_type_name},
instance=instance)
same_instance_type = (current_instance_type['id'] ==
new_instance_type['id'])
# NOTE(sirp): We don't want to force a customer to change their flavor
# when Ops is migrating off of a failed host.
if not same_instance_type and new_instance_type.get('disabled'):
raise exception.FlavorNotFound(flavor_id=flavor_id)
if same_instance_type and flavor_id and self.cell_type != 'compute':
raise exception.CannotResizeToSameFlavor()
# ensure there is sufficient headroom for upsizes
if flavor_id:
deltas = compute_utils.upsize_quota_delta(context,
new_instance_type,
current_instance_type)
try:
quotas = compute_utils.reserve_quota_delta(context, deltas,
instance)
except exception.OverQuota as exc:
quotas = exc.kwargs['quotas']
overs = exc.kwargs['overs']
usages = exc.kwargs['usages']
headroom = self._get_headroom(quotas, usages, deltas)
(overs, reqs, total_alloweds,
useds) = self._get_over_quota_detail(headroom, overs, quotas,
deltas)
LOG.warning(_LW("%(overs)s quota exceeded for %(pid)s,"
" tried to resize instance."),
{'overs': overs, 'pid': context.project_id})
raise exception.TooManyInstances(overs=overs,
req=reqs,
used=useds,
allowed=total_alloweds)
else:
quotas = objects.Quotas(context=context)
instance.task_state = task_states.RESIZE_PREP
instance.progress = 0
instance.update(extra_instance_updates)
instance.save(expected_task_state=[None])
filter_properties = {'ignore_hosts': []}
if not CONF.allow_resize_to_same_host:
filter_properties['ignore_hosts'].append(instance.host)
if self.cell_type == 'api':
# Commit reservations early and create migration record.
self._resize_cells_support(context, quotas, instance,
current_instance_type,
new_instance_type)
if not flavor_id:
self._record_action_start(context, instance,
instance_actions.MIGRATE)
else:
self._record_action_start(context, instance,
instance_actions.RESIZE)
scheduler_hint = {'filter_properties': filter_properties}
self.compute_task_api.resize_instance(context, instance,
extra_instance_updates, scheduler_hint=scheduler_hint,
flavor=new_instance_type,
reservations=quotas.reservations or [],
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.PAUSED, vm_states.SUSPENDED])
def shelve(self, context, instance, clean_shutdown=True):
"""Shelve an instance.
Shuts down an instance and frees it up to be removed from the
hypervisor.
"""
instance.task_state = task_states.SHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SHELVE)
if not self.is_volume_backed_instance(context, instance):
name = '%s-shelved' % instance.display_name
image_meta = self._create_image(context, instance, name,
'snapshot')
image_id = image_meta['id']
self.compute_rpcapi.shelve_instance(context, instance=instance,
image_id=image_id, clean_shutdown=clean_shutdown)
else:
self.compute_rpcapi.shelve_offload_instance(context,
instance=instance, clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED])
def shelve_offload(self, context, instance, clean_shutdown=True):
"""Remove a shelved instance from the hypervisor."""
instance.task_state = task_states.SHELVING_OFFLOADING
instance.save(expected_task_state=[None])
self.compute_rpcapi.shelve_offload_instance(context, instance=instance,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.SHELVED,
vm_states.SHELVED_OFFLOADED])
def unshelve(self, context, instance):
"""Restore a shelved instance."""
instance.task_state = task_states.UNSHELVING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNSHELVE)
self.compute_task_api.unshelve_instance(context, instance)
@wrap_check_policy
@check_instance_lock
def add_fixed_ip(self, context, instance, network_id):
"""Add fixed_ip from specified network to given instance."""
self.compute_rpcapi.add_fixed_ip_to_instance(context,
instance=instance, network_id=network_id)
@wrap_check_policy
@check_instance_lock
def remove_fixed_ip(self, context, instance, address):
"""Remove fixed_ip from specified network to given instance."""
self.compute_rpcapi.remove_fixed_ip_from_instance(context,
instance=instance, address=address)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def pause(self, context, instance):
"""Pause the given instance."""
instance.task_state = task_states.PAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.PAUSE)
self.compute_rpcapi.pause_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.PAUSED])
def unpause(self, context, instance):
"""Unpause the given instance."""
instance.task_state = task_states.UNPAUSING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNPAUSE)
self.compute_rpcapi.unpause_instance(context, instance)
@wrap_check_policy
def get_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_diagnostics(context, instance=instance)
@wrap_check_policy
def get_instance_diagnostics(self, context, instance):
"""Retrieve diagnostics for the given instance."""
return self.compute_rpcapi.get_instance_diagnostics(context,
instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def suspend(self, context, instance):
"""Suspend the given instance."""
instance.task_state = task_states.SUSPENDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.SUSPEND)
self.compute_rpcapi.suspend_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.SUSPENDED])
def resume(self, context, instance):
"""Resume the given instance."""
instance.task_state = task_states.RESUMING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESUME)
self.compute_rpcapi.resume_instance(context, instance)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def rescue(self, context, instance, rescue_password=None,
rescue_image_ref=None, clean_shutdown=True):
"""Rescue the given instance."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
for bdm in bdms:
if bdm.volume_id:
vol = self.volume_api.get(context, bdm.volume_id)
self.volume_api.check_attached(context, vol)
if self.is_volume_backed_instance(context, instance, bdms):
reason = _("Cannot rescue a volume-backed instance")
raise exception.InstanceNotRescuable(instance_id=instance.uuid,
reason=reason)
instance.task_state = task_states.RESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.RESCUE)
self.compute_rpcapi.rescue_instance(context, instance=instance,
rescue_password=rescue_password, rescue_image_ref=rescue_image_ref,
clean_shutdown=clean_shutdown)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.RESCUED])
def unrescue(self, context, instance):
"""Unrescue the given instance."""
instance.task_state = task_states.UNRESCUING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.UNRESCUE)
self.compute_rpcapi.unrescue_instance(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE])
def set_admin_password(self, context, instance, password=None):
"""Set the root/admin password for the given instance.
@param context: Nova auth context.
@param instance: Nova instance object.
@param password: The admin password for the instance.
"""
instance.task_state = task_states.UPDATING_PASSWORD
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.CHANGE_PASSWORD)
self.compute_rpcapi.set_admin_password(context,
instance=instance,
new_pass=password)
@wrap_check_policy
@check_instance_host
def get_vnc_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_vnc_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_vnc_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_spice_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_spice_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_spice_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_rdp_console(self, context, instance, console_type):
"""Get a url to an instance Console."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_rdp_connect_info(self, context, instance, console_type):
"""Used in a child cell to get console info."""
connect_info = self.compute_rpcapi.get_rdp_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_serial_console(self, context, instance, console_type):
"""Get a url to a serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@check_instance_host
def get_serial_console_connect_info(self, context, instance, console_type):
"""Used in a child cell to get serial console."""
connect_info = self.compute_rpcapi.get_serial_console(context,
instance=instance, console_type=console_type)
return connect_info
@wrap_check_policy
@check_instance_host
def get_mks_console(self, context, instance, console_type):
"""Get a url to a MKS console."""
connect_info = self.compute_rpcapi.get_mks_console(context,
instance=instance, console_type=console_type)
self.consoleauth_rpcapi.authorize_console(context,
connect_info['token'], console_type,
connect_info['host'], connect_info['port'],
connect_info['internal_access_path'], instance.uuid,
access_url=connect_info['access_url'])
return {'url': connect_info['access_url']}
@wrap_check_policy
@check_instance_host
def get_console_output(self, context, instance, tail_length=None):
"""Get console output for an instance."""
return self.compute_rpcapi.get_console_output(context,
instance=instance, tail_length=tail_length)
@wrap_check_policy
def lock(self, context, instance):
"""Lock the given instance."""
# Only update the lock if we are an admin (non-owner)
is_owner = instance.project_id == context.project_id
if instance.locked and is_owner:
return
context = context.elevated()
LOG.debug('Locking', context=context, instance=instance)
instance.locked = True
instance.locked_by = 'owner' if is_owner else 'admin'
instance.save()
def is_expected_locked_by(self, context, instance):
is_owner = instance.project_id == context.project_id
expect_locked_by = 'owner' if is_owner else 'admin'
locked_by = instance.locked_by
if locked_by and locked_by != expect_locked_by:
return False
return True
@wrap_check_policy
def unlock(self, context, instance):
"""Unlock the given instance."""
# If the instance was locked by someone else, check
# that we're allowed to override the lock
if not self.skip_policy_check and not self.is_expected_locked_by(
context, instance):
check_policy(context, 'unlock_override', instance)
context = context.elevated()
LOG.debug('Unlocking', context=context, instance=instance)
instance.locked = False
instance.locked_by = None
instance.save()
@wrap_check_policy
def get_lock(self, context, instance):
"""Return the boolean state of given instance's lock."""
return self.get(context, instance.uuid)['locked']
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def reset_network(self, context, instance):
"""Reset networking on the instance."""
self.compute_rpcapi.reset_network(context, instance=instance)
@wrap_check_policy
@check_instance_lock
@check_instance_cell
def inject_network_info(self, context, instance):
"""Inject network info for the instance."""
self.compute_rpcapi.inject_network_info(context, instance=instance)
def _attach_volume(self, context, instance, volume_id, device,
disk_bus, device_type):
"""Attach an existing volume to an existing instance.
This method is separated to make it possible for cells version
to override it.
"""
# NOTE(vish): This is done on the compute host because we want
# to avoid a race where two devices are requested at
# the same time. When db access is removed from
# compute, the bdm will be created here and we will
# have to make sure that they are assigned atomically.
volume_bdm = self.compute_rpcapi.reserve_block_device_name(
context, instance, device, volume_id, disk_bus=disk_bus,
device_type=device_type)
try:
volume = self.volume_api.get(context, volume_id)
self.volume_api.check_attach(context, volume, instance=instance)
self.volume_api.reserve_volume(context, volume_id)
self.compute_rpcapi.attach_volume(context, instance, volume_bdm)
except Exception:
with excutils.save_and_reraise_exception():
volume_bdm.destroy()
return volume_bdm.device_name
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def attach_volume(self, context, instance, volume_id, device=None,
disk_bus=None, device_type=None):
"""Attach an existing volume to an existing instance."""
# NOTE(vish): Fail fast if the device is not going to pass. This
# will need to be removed along with the test if we
# change the logic in the manager for what constitutes
# a valid device.
if device and not block_device.match_device(device):
raise exception.InvalidDevicePath(path=device)
return self._attach_volume(context, instance, volume_id, device,
disk_bus, device_type)
def _detach_volume(self, context, instance, volume):
"""Detach volume from instance.
This method is separated to make it easier for cells version
to override.
"""
self.volume_api.check_detach(context, volume)
self.volume_api.begin_detaching(context, volume['id'])
self.compute_rpcapi.detach_volume(context, instance=instance,
volume_id=volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED, vm_states.RESIZED,
vm_states.SOFT_DELETED])
def detach_volume(self, context, instance, volume):
"""Detach a volume from an instance."""
if volume['attach_status'] == 'detached':
msg = _("Volume must be attached in order to detach.")
raise exception.InvalidVolume(reason=msg)
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if volume['instance_uuid'] != instance.uuid:
raise exception.VolumeUnattached(volume_id=volume['id'])
self._detach_volume(context, instance, volume)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED,
vm_states.RESIZED, vm_states.SOFT_DELETED])
def swap_volume(self, context, instance, old_volume, new_volume):
"""Swap volume attached to an instance."""
if old_volume['attach_status'] == 'detached':
raise exception.VolumeUnattached(volume_id=old_volume['id'])
# The caller likely got the instance from volume['instance_uuid']
# in the first place, but let's sanity check.
if old_volume['instance_uuid'] != instance.uuid:
msg = _("Old volume is attached to a different instance.")
raise exception.InvalidVolume(reason=msg)
if new_volume['attach_status'] == 'attached':
msg = _("New volume must be detached in order to swap.")
raise exception.InvalidVolume(reason=msg)
if int(new_volume['size']) < int(old_volume['size']):
msg = _("New volume must be the same size or larger.")
raise exception.InvalidVolume(reason=msg)
self.volume_api.check_detach(context, old_volume)
self.volume_api.check_attach(context, new_volume, instance=instance)
self.volume_api.begin_detaching(context, old_volume['id'])
self.volume_api.reserve_volume(context, new_volume['id'])
try:
self.compute_rpcapi.swap_volume(
context, instance=instance,
old_volume_id=old_volume['id'],
new_volume_id=new_volume['id'])
except Exception:
with excutils.save_and_reraise_exception():
self.volume_api.roll_detaching(context, old_volume['id'])
self.volume_api.unreserve_volume(context, new_volume['id'])
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def attach_interface(self, context, instance, network_id, port_id,
requested_ip):
"""Use hotplug to add an network adapter to an instance."""
return self.compute_rpcapi.attach_interface(context,
instance=instance, network_id=network_id, port_id=port_id,
requested_ip=requested_ip)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.STOPPED],
task_state=[None])
def detach_interface(self, context, instance, port_id):
"""Detach an network adapter from an instance."""
self.compute_rpcapi.detach_interface(context, instance=instance,
port_id=port_id)
@wrap_check_policy
def get_instance_metadata(self, context, instance):
"""Get all metadata associated with an instance."""
return self.db.instance_metadata_get(context, instance.uuid)
def get_all_instance_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='metadata')
def get_all_system_metadata(self, context, search_filts):
return self._get_all_instance_metadata(
context, search_filts, metadata_type='system_metadata')
def _get_all_instance_metadata(self, context, search_filts, metadata_type):
"""Get all metadata."""
instances = self._get_instances_by_filters(context, filters={},
sort_keys=['created_at'],
sort_dirs=['desc'])
for instance in instances:
try:
check_policy(context, 'get_all_instance_%s' % metadata_type,
instance)
except exception.PolicyNotAuthorized:
# failed policy check - not allowed to
# read this metadata
continue
return utils.filter_and_format_resource_metadata('instance', instances,
search_filts, metadata_type)
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def delete_instance_metadata(self, context, instance, key):
"""Delete the given metadata item from an instance."""
instance.delete_metadata_key(key)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff={key: ['-']})
@wrap_check_policy
@check_instance_lock
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED,
vm_states.SUSPENDED, vm_states.STOPPED],
task_state=None)
def update_instance_metadata(self, context, instance,
metadata, delete=False):
"""Updates or creates instance metadata.
If delete is True, metadata items that are not specified in the
`metadata` argument will be deleted.
"""
orig = dict(instance.metadata)
if delete:
_metadata = metadata
else:
_metadata = dict(instance.metadata)
_metadata.update(metadata)
self._check_metadata_properties_quota(context, _metadata)
instance.metadata = _metadata
instance.save()
diff = _diff_dict(orig, instance.metadata)
self.compute_rpcapi.change_instance_metadata(context,
instance=instance,
diff=diff)
return _metadata
def get_instance_faults(self, context, instances):
"""Get all faults for a list of instance uuids."""
if not instances:
return {}
for instance in instances:
check_policy(context, 'get_instance_faults', instance)
uuids = [instance.uuid for instance in instances]
return self.db.instance_fault_get_by_instance_uuids(context, uuids)
def _get_root_bdm(self, context, instance, bdms=None):
if bdms is None:
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
return bdms.root_bdm()
def is_volume_backed_instance(self, context, instance, bdms=None):
if not instance.image_ref:
return True
root_bdm = self._get_root_bdm(context, instance, bdms)
if not root_bdm:
return False
return root_bdm.is_volume
@check_instance_lock
@check_instance_cell
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.PAUSED])
def live_migrate(self, context, instance, block_migration,
disk_over_commit, host_name):
"""Migrate a server lively to a new host."""
LOG.debug("Going to try to live migrate instance to %s",
host_name or "another host", instance=instance)
instance.task_state = task_states.MIGRATING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance,
instance_actions.LIVE_MIGRATION)
self.compute_task_api.live_migrate_instance(context, instance,
host_name, block_migration=block_migration,
disk_over_commit=disk_over_commit)
@check_instance_state(vm_state=[vm_states.ACTIVE, vm_states.STOPPED,
vm_states.ERROR])
def evacuate(self, context, instance, host, on_shared_storage,
admin_password=None):
"""Running evacuate to target host.
Checking vm compute host state, if the host not in expected_state,
raising an exception.
:param instance: The instance to evacuate
:param host: Target host. if not set, the scheduler will pick up one
:param on_shared_storage: True if instance files on shared storage
:param admin_password: password to set on rebuilt instance
"""
LOG.debug('vm evacuation scheduled', instance=instance)
inst_host = instance.host
service = objects.Service.get_by_compute_host(context, inst_host)
if self.servicegroup_api.service_is_up(service):
LOG.error(_LE('Instance compute service state on %s '
'expected to be down, but it was up.'), inst_host)
raise exception.ComputeServiceInUse(host=inst_host)
instance.task_state = task_states.REBUILDING
instance.save(expected_task_state=[None])
self._record_action_start(context, instance, instance_actions.EVACUATE)
# NOTE(danms): Create this as a tombstone for the source compute
# to find and cleanup. No need to pass it anywhere else.
migration = objects.Migration(context,
source_compute=instance.host,
source_node=instance.node,
instance_uuid=instance.uuid,
status='accepted',
migration_type='evacuation')
if host:
migration.dest_compute = host
migration.create()
compute_utils.notify_about_instance_usage(
self.notifier, context, instance, "evacuate")
return self.compute_task_api.rebuild_instance(context,
instance=instance,
new_pass=admin_password,
injected_files=None,
image_ref=None,
orig_image_ref=None,
orig_sys_metadata=None,
bdms=None,
recreate=True,
on_shared_storage=on_shared_storage,
host=host)
def get_migrations(self, context, filters):
"""Get all migrations for the given filters."""
return objects.MigrationList.get_by_filters(context, filters)
@wrap_check_policy
def volume_snapshot_create(self, context, volume_id, create_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_create(context, bdm.instance,
volume_id, create_info)
snapshot = {
'snapshot': {
'id': create_info.get('id'),
'volumeId': volume_id
}
}
return snapshot
@wrap_check_policy
def volume_snapshot_delete(self, context, volume_id, snapshot_id,
delete_info):
bdm = objects.BlockDeviceMapping.get_by_volume_id(
context, volume_id, expected_attrs=['instance'])
self.compute_rpcapi.volume_snapshot_delete(context, bdm.instance,
volume_id, snapshot_id, delete_info)
def external_instance_event(self, context, instances, events):
# NOTE(danms): The external API consumer just provides events,
# but doesn't know where they go. We need to collate lists
# by the host the affected instance is on and dispatch them
# according to host
instances_by_host = {}
events_by_host = {}
hosts_by_instance = {}
for instance in instances:
instances_on_host = instances_by_host.get(instance.host, [])
instances_on_host.append(instance)
instances_by_host[instance.host] = instances_on_host
hosts_by_instance[instance.uuid] = instance.host
for event in events:
host = hosts_by_instance[event.instance_uuid]
events_on_host = events_by_host.get(host, [])
events_on_host.append(event)
events_by_host[host] = events_on_host
for host in instances_by_host:
# TODO(salv-orlando): Handle exceptions raised by the rpc api layer
# in order to ensure that a failure in processing events on a host
# will not prevent processing events on other hosts
self.compute_rpcapi.external_instance_event(
context, instances_by_host[host], events_by_host[host])
class HostAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host operations."""
def __init__(self, rpcapi=None):
self.rpcapi = rpcapi or compute_rpcapi.ComputeAPI()
self.servicegroup_api = servicegroup.API()
super(HostAPI, self).__init__()
def _assert_host_exists(self, context, host_name, must_be_up=False):
"""Raise HostNotFound if compute host doesn't exist."""
service = objects.Service.get_by_compute_host(context, host_name)
if not service:
raise exception.HostNotFound(host=host_name)
if must_be_up and not self.servicegroup_api.service_is_up(service):
raise exception.ComputeServiceUnavailable(host=host_name)
return service['host']
@wrap_exception()
def set_host_enabled(self, context, host_name, enabled):
"""Sets the specified host's ability to accept new instances."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'enabled': enabled}
compute_utils.notify_about_host_update(context,
'set_enabled.start',
payload)
result = self.rpcapi.set_host_enabled(context, enabled=enabled,
host=host_name)
compute_utils.notify_about_host_update(context,
'set_enabled.end',
payload)
return result
def get_host_uptime(self, context, host_name):
"""Returns the result of calling "uptime" on the target host."""
host_name = self._assert_host_exists(context, host_name,
must_be_up=True)
return self.rpcapi.get_host_uptime(context, host=host_name)
@wrap_exception()
def host_power_action(self, context, host_name, action):
"""Reboots, shuts down or powers up the host."""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'action': action}
compute_utils.notify_about_host_update(context,
'power_action.start',
payload)
result = self.rpcapi.host_power_action(context, action=action,
host=host_name)
compute_utils.notify_about_host_update(context,
'power_action.end',
payload)
return result
@wrap_exception()
def set_host_maintenance(self, context, host_name, mode):
"""Start/Stop host maintenance window. On start, it triggers
guest VMs evacuation.
"""
host_name = self._assert_host_exists(context, host_name)
payload = {'host_name': host_name, 'mode': mode}
compute_utils.notify_about_host_update(context,
'set_maintenance.start',
payload)
result = self.rpcapi.host_maintenance_mode(context,
host_param=host_name, mode=mode, host=host_name)
compute_utils.notify_about_host_update(context,
'set_maintenance.end',
payload)
return result
def service_get_all(self, context, filters=None, set_zones=False):
"""Returns a list of services, optionally filtering the results.
If specified, 'filters' should be a dictionary containing services
attributes and matching values. Ie, to get a list of services for
the 'compute' topic, use filters={'topic': 'compute'}.
"""
if filters is None:
filters = {}
disabled = filters.pop('disabled', None)
if 'availability_zone' in filters:
set_zones = True
services = objects.ServiceList.get_all(context, disabled,
set_zones=set_zones)
ret_services = []
for service in services:
for key, val in six.iteritems(filters):
if service[key] != val:
break
else:
# All filters matched.
ret_services.append(service)
return ret_services
def service_get_by_compute_host(self, context, host_name):
"""Get service entry for the given compute hostname."""
return objects.Service.get_by_compute_host(context, host_name)
def _service_update(self, context, host_name, binary, params_to_update):
"""Performs the actual service update operation."""
service = objects.Service.get_by_args(context, host_name, binary)
service.update(params_to_update)
service.save()
return service
def service_update(self, context, host_name, binary, params_to_update):
"""Enable / Disable a service.
For compute services, this stops new builds and migrations going to
the host.
"""
return self._service_update(context, host_name, binary,
params_to_update)
def _service_delete(self, context, service_id):
"""Performs the actual Service deletion operation."""
objects.Service.get_by_id(context, service_id).destroy()
def service_delete(self, context, service_id):
"""Deletes the specified service."""
self._service_delete(context, service_id)
def instance_get_all_by_host(self, context, host_name):
"""Return all instances on the given host."""
return objects.InstanceList.get_by_host(context, host_name)
def task_log_get_all(self, context, task_name, period_beginning,
period_ending, host=None, state=None):
"""Return the task logs within a given range, optionally
filtering by host and/or state.
"""
return self.db.task_log_get_all(context, task_name,
period_beginning,
period_ending,
host=host,
state=state)
def compute_node_get(self, context, compute_id):
"""Return compute node entry for particular integer ID."""
return objects.ComputeNode.get_by_id(context, int(compute_id))
def compute_node_get_all(self, context):
return objects.ComputeNodeList.get_all(context)
def compute_node_search_by_hypervisor(self, context, hypervisor_match):
return objects.ComputeNodeList.get_by_hypervisor(context,
hypervisor_match)
def compute_node_statistics(self, context):
return self.db.compute_node_statistics(context)
class InstanceActionAPI(base.Base):
"""Sub-set of the Compute Manager API for managing instance actions."""
def actions_get(self, context, instance):
return objects.InstanceActionList.get_by_instance_uuid(
context, instance.uuid)
def action_get_by_request_id(self, context, instance, request_id):
return objects.InstanceAction.get_by_request_id(
context, instance.uuid, request_id)
def action_events_get(self, context, instance, action_id):
return objects.InstanceActionEventList.get_by_action(
context, action_id)
class AggregateAPI(base.Base):
"""Sub-set of the Compute Manager API for managing host aggregates."""
def __init__(self, **kwargs):
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
self.scheduler_client = scheduler_client.SchedulerClient()
super(AggregateAPI, self).__init__(**kwargs)
@wrap_exception()
def create_aggregate(self, context, aggregate_name, availability_zone):
"""Creates the model for the aggregate."""
aggregate = objects.Aggregate(context=context)
aggregate.name = aggregate_name
if availability_zone:
aggregate.metadata = {'availability_zone': availability_zone}
aggregate.create()
self.scheduler_client.update_aggregates(context, [aggregate])
return aggregate
def get_aggregate(self, context, aggregate_id):
"""Get an aggregate by id."""
return objects.Aggregate.get_by_id(context, aggregate_id)
def get_aggregate_list(self, context):
"""Get all the aggregates."""
return objects.AggregateList.get_all(context)
@wrap_exception()
def update_aggregate(self, context, aggregate_id, values):
"""Update the properties of an aggregate."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if 'name' in values:
aggregate.name = values.pop('name')
aggregate.save()
self.is_safe_to_update_az(context, values, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE)
if values:
aggregate.update_metadata(values)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated values include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if values.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def update_aggregate_metadata(self, context, aggregate_id, metadata):
"""Updates the aggregate metadata."""
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, metadata, aggregate=aggregate,
action_name=AGGREGATE_ACTION_UPDATE_META)
aggregate.update_metadata(metadata)
self.scheduler_client.update_aggregates(context, [aggregate])
# If updated metadata include availability_zones, then the cache
# which stored availability_zones and host need to be reset
if metadata and metadata.get('availability_zone'):
availability_zones.reset_cache()
return aggregate
@wrap_exception()
def delete_aggregate(self, context, aggregate_id):
"""Deletes the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id}
compute_utils.notify_about_aggregate_update(context,
"delete.start",
aggregate_payload)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
if len(aggregate.hosts) > 0:
msg = _("Host aggregate is not empty")
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=msg)
aggregate.destroy()
self.scheduler_client.delete_aggregate(context, aggregate)
compute_utils.notify_about_aggregate_update(context,
"delete.end",
aggregate_payload)
def is_safe_to_update_az(self, context, metadata, aggregate,
hosts=None,
action_name=AGGREGATE_ACTION_ADD):
"""Determine if updates alter an aggregate's availability zone.
:param context: local context
:param metadata: Target metadata for updating aggregate
:param aggregate: Aggregate to update
:param hosts: Hosts to check. If None, aggregate.hosts is used
:type hosts: list
:action_name: Calling method for logging purposes
"""
if 'availability_zone' in metadata:
_hosts = hosts or aggregate.hosts
host_aggregates = objects.AggregateList.get_by_metadata_key(
context, 'availability_zone', hosts=_hosts)
conflicting_azs = [
agg.availability_zone for agg in host_aggregates
if agg.availability_zone != metadata['availability_zone']
and agg.id != aggregate.id]
if conflicting_azs:
msg = _("One or more hosts already in availability zone(s) "
"%s") % conflicting_azs
self._raise_invalid_aggregate_exc(action_name, aggregate.id,
msg)
def _raise_invalid_aggregate_exc(self, action_name, aggregate_id, reason):
if action_name == AGGREGATE_ACTION_ADD:
raise exception.InvalidAggregateActionAdd(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE:
raise exception.InvalidAggregateActionUpdate(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_UPDATE_META:
raise exception.InvalidAggregateActionUpdateMeta(
aggregate_id=aggregate_id, reason=reason)
elif action_name == AGGREGATE_ACTION_DELETE:
raise exception.InvalidAggregateActionDelete(
aggregate_id=aggregate_id, reason=reason)
raise exception.NovaException(
_("Unexpected aggregate action %s") % action_name)
def _update_az_cache_for_host(self, context, host_name, aggregate_meta):
# Update the availability_zone cache to avoid getting wrong
# availability_zone in cache retention time when add/remove
# host to/from aggregate.
if aggregate_meta and aggregate_meta.get('availability_zone'):
availability_zones.update_host_availability_zone_cache(context,
host_name)
@wrap_exception()
def add_host_to_aggregate(self, context, aggregate_id, host_name):
"""Adds the host to an aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"addhost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
self.is_safe_to_update_az(context, aggregate.metadata,
hosts=[host_name], aggregate=aggregate)
aggregate.add_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
# NOTE(jogo): Send message to host to support resource pools
self.compute_rpcapi.add_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
aggregate_payload.update({'name': aggregate['name']})
compute_utils.notify_about_aggregate_update(context,
"addhost.end",
aggregate_payload)
return aggregate
@wrap_exception()
def remove_host_from_aggregate(self, context, aggregate_id, host_name):
"""Removes host from the aggregate."""
aggregate_payload = {'aggregate_id': aggregate_id,
'host_name': host_name}
compute_utils.notify_about_aggregate_update(context,
"removehost.start",
aggregate_payload)
# validates the host; ComputeHostNotFound is raised if invalid
objects.Service.get_by_compute_host(context, host_name)
aggregate = objects.Aggregate.get_by_id(context, aggregate_id)
aggregate.delete_host(host_name)
self.scheduler_client.update_aggregates(context, [aggregate])
self._update_az_cache_for_host(context, host_name, aggregate.metadata)
self.compute_rpcapi.remove_aggregate_host(context,
aggregate=aggregate, host_param=host_name, host=host_name)
compute_utils.notify_about_aggregate_update(context,
"removehost.end",
aggregate_payload)
return aggregate
class KeypairAPI(base.Base):
"""Subset of the Compute Manager API for managing key pairs."""
get_notifier = functools.partial(rpc.get_notifier, service='api')
wrap_exception = functools.partial(exception.wrap_exception,
get_notifier=get_notifier)
def _notify(self, context, event_suffix, keypair_name):
payload = {
'tenant_id': context.project_id,
'user_id': context.user_id,
'key_name': keypair_name,
}
notify = self.get_notifier()
notify.info(context, 'keypair.%s' % event_suffix, payload)
def _validate_new_key_pair(self, context, user_id, key_name, key_type):
safe_chars = "_- " + string.digits + string.ascii_letters
clean_value = "".join(x for x in key_name if x in safe_chars)
if clean_value != key_name:
raise exception.InvalidKeypair(
reason=_("Keypair name contains unsafe characters"))
try:
utils.check_string_length(key_name, min_length=1, max_length=255)
except exception.InvalidInput:
raise exception.InvalidKeypair(
reason=_('Keypair name must be string and between '
'1 and 255 characters long'))
count = objects.Quotas.count(context, 'key_pairs', user_id)
try:
objects.Quotas.limit_check(context, key_pairs=count + 1)
except exception.OverQuota:
raise exception.KeypairLimitExceeded()
@wrap_exception()
def import_key_pair(self, context, user_id, key_name, public_key,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Import a key pair using an existing public key."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'import.start', key_name)
fingerprint = self._generate_fingerprint(public_key, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'import.end', key_name)
return keypair
@wrap_exception()
def create_key_pair(self, context, user_id, key_name,
key_type=keypair_obj.KEYPAIR_TYPE_SSH):
"""Create a new key pair."""
self._validate_new_key_pair(context, user_id, key_name, key_type)
self._notify(context, 'create.start', key_name)
private_key, public_key, fingerprint = self._generate_key_pair(
user_id, key_type)
keypair = objects.KeyPair(context)
keypair.user_id = user_id
keypair.name = key_name
keypair.type = key_type
keypair.fingerprint = fingerprint
keypair.public_key = public_key
keypair.create()
self._notify(context, 'create.end', key_name)
return keypair, private_key
def _generate_fingerprint(self, public_key, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_fingerprint(public_key)
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_x509_fingerprint(public_key)
def _generate_key_pair(self, user_id, key_type):
if key_type == keypair_obj.KEYPAIR_TYPE_SSH:
return crypto.generate_key_pair()
elif key_type == keypair_obj.KEYPAIR_TYPE_X509:
return crypto.generate_winrm_x509_cert(user_id)
@wrap_exception()
def delete_key_pair(self, context, user_id, key_name):
"""Delete a keypair by name."""
self._notify(context, 'delete.start', key_name)
objects.KeyPair.destroy_by_name(context, user_id, key_name)
self._notify(context, 'delete.end', key_name)
def get_key_pairs(self, context, user_id):
"""List key pairs."""
return objects.KeyPairList.get_by_user(context, user_id)
def get_key_pair(self, context, user_id, key_name):
"""Get a keypair by name."""
return objects.KeyPair.get_by_name(context, user_id, key_name)
class SecurityGroupAPI(base.Base, security_group_base.SecurityGroupBase):
"""Sub-set of the Compute API related to managing security groups
and security group rules
"""
# The nova security group api does not use a uuid for the id.
id_is_uuid = False
def __init__(self, skip_policy_check=False, **kwargs):
super(SecurityGroupAPI, self).__init__(**kwargs)
self.skip_policy_check = skip_policy_check
self.compute_rpcapi = compute_rpcapi.ComputeAPI()
def validate_property(self, value, property, allowed):
"""Validate given security group property.
:param value: the value to validate, as a string or unicode
:param property: the property, either 'name' or 'description'
:param allowed: the range of characters allowed
"""
try:
val = value.strip()
except AttributeError:
msg = _("Security group %s is not a string or unicode") % property
self.raise_invalid_property(msg)
utils.check_string_length(val, name=property, min_length=1,
max_length=255)
if allowed and not re.match(allowed, val):
# Some validation to ensure that values match API spec.
# - Alphanumeric characters, spaces, dashes, and underscores.
# TODO(Daviey): LP: #813685 extend beyond group_name checking, and
# probably create a param validator that can be used elsewhere.
msg = (_("Value (%(value)s) for parameter Group%(property)s is "
"invalid. Content limited to '%(allowed)s'.") %
{'value': value, 'allowed': allowed,
'property': property.capitalize()})
self.raise_invalid_property(msg)
def ensure_default(self, context):
"""Ensure that a context has a security group.
Creates a security group for the security context if it does not
already exist.
:param context: the security context
"""
self.db.security_group_ensure_default(context)
def create_security_group(self, context, name, description):
quotas = objects.Quotas(context=context)
try:
quotas.reserve(security_groups=1)
except exception.OverQuota:
msg = _("Quota exceeded, too many security groups.")
self.raise_over_quota(msg)
LOG.info(_LI("Create Security Group %s"), name, context=context)
try:
self.ensure_default(context)
group = {'user_id': context.user_id,
'project_id': context.project_id,
'name': name,
'description': description}
try:
group_ref = self.db.security_group_create(context, group)
except exception.SecurityGroupExists:
msg = _('Security group %s already exists') % name
self.raise_group_already_exists(msg)
# Commit the reservation
quotas.commit()
except Exception:
with excutils.save_and_reraise_exception():
quotas.rollback()
return group_ref
def update_security_group(self, context, security_group,
name, description):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = (_("Unable to update system group '%s'") %
security_group['name'])
self.raise_invalid_group(msg)
group = {'name': name,
'description': description}
columns_to_join = ['rules.grantee_group']
group_ref = self.db.security_group_update(context,
security_group['id'],
group,
columns_to_join=columns_to_join)
return group_ref
def get(self, context, name=None, id=None, map_exception=False):
self.ensure_default(context)
try:
if name:
return self.db.security_group_get_by_name(context,
context.project_id,
name)
elif id:
return self.db.security_group_get(context, id)
except exception.NotFound as exp:
if map_exception:
msg = exp.format_message()
self.raise_not_found(msg)
else:
raise
def list(self, context, names=None, ids=None, project=None,
search_opts=None):
self.ensure_default(context)
groups = []
if names or ids:
if names:
for name in names:
groups.append(self.db.security_group_get_by_name(context,
project,
name))
if ids:
for id in ids:
groups.append(self.db.security_group_get(context, id))
elif context.is_admin:
# TODO(eglynn): support a wider set of search options than just
# all_tenants, at least include the standard filters defined for
# the EC2 DescribeSecurityGroups API for the non-admin case also
if (search_opts and 'all_tenants' in search_opts):
groups = self.db.security_group_get_all(context)
else:
groups = self.db.security_group_get_by_project(context,
project)
elif project:
groups = self.db.security_group_get_by_project(context, project)
return groups
def destroy(self, context, security_group):
if security_group['name'] in RO_SECURITY_GROUPS:
msg = _("Unable to delete system group '%s'") % \
security_group['name']
self.raise_invalid_group(msg)
if self.db.security_group_in_use(context, security_group['id']):
msg = _("Security group is still in use")
self.raise_invalid_group(msg)
quotas = objects.Quotas(context=context)
quota_project, quota_user = quotas_obj.ids_from_security_group(
context, security_group)
try:
quotas.reserve(project_id=quota_project,
user_id=quota_user, security_groups=-1)
except Exception:
LOG.exception(_LE("Failed to update usages deallocating "
"security group"))
LOG.info(_LI("Delete security group %s"), security_group['name'],
context=context)
self.db.security_group_destroy(context, security_group['id'])
# Commit the reservations
quotas.commit()
def is_associated_with_server(self, security_group, instance_uuid):
"""Check if the security group is already associated
with the instance. If Yes, return True.
"""
if not security_group:
return False
instances = security_group.get('instances')
if not instances:
return False
for inst in instances:
if (instance_uuid == inst['uuid']):
return True
return False
@wrap_check_security_groups_policy
def add_to_instance(self, context, instance, security_group_name):
"""Add security group to the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_add_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.compute_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance.host)
@wrap_check_security_groups_policy
def remove_from_instance(self, context, instance, security_group_name):
"""Remove the security group associated with the instance."""
security_group = self.db.security_group_get_by_name(context,
context.project_id,
security_group_name)
instance_uuid = instance.uuid
# check if the security group is associated with the server
if not self.is_associated_with_server(security_group, instance_uuid):
raise exception.SecurityGroupNotExistsForInstance(
security_group_id=security_group['id'],
instance_id=instance_uuid)
self.db.instance_remove_security_group(context.elevated(),
instance_uuid,
security_group['id'])
# NOTE(comstud): No instance_uuid argument to this compute manager
# call
self.compute_rpcapi.refresh_security_group_rules(context,
security_group['id'], host=instance.host)
def get_rule(self, context, id):
self.ensure_default(context)
try:
return self.db.security_group_rule_get(context, id)
except exception.NotFound:
msg = _("Rule (%s) not found") % id
self.raise_not_found(msg)
def add_rules(self, context, id, name, vals):
"""Add security group rule(s) to security group.
Note: the Nova security group API doesn't support adding multiple
security group rules at once but the EC2 one does. Therefore,
this function is written to support both.
"""
count = objects.Quotas.count(context, 'security_group_rules', id)
try:
projected = count + len(vals)
objects.Quotas.limit_check(context, security_group_rules=projected)
except exception.OverQuota:
msg = _("Quota exceeded, too many security group rules.")
self.raise_over_quota(msg)
msg = _("Security group %(name)s added %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
rules = []
for v in vals:
rule = self.db.security_group_rule_create(context, v)
rules.append(rule)
LOG.info(msg, {'name': name,
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.trigger_rules_refresh(context, id=id)
return rules
def remove_rules(self, context, security_group, rule_ids):
msg = _("Security group %(name)s removed %(protocol)s ingress "
"(%(from_port)s:%(to_port)s)")
for rule_id in rule_ids:
rule = self.get_rule(context, rule_id)
LOG.info(msg, {'name': security_group['name'],
'protocol': rule.protocol,
'from_port': rule.from_port,
'to_port': rule.to_port})
self.db.security_group_rule_destroy(context, rule_id)
# NOTE(vish): we removed some rules, so refresh
self.trigger_rules_refresh(context, id=security_group['id'])
def remove_default_rules(self, context, rule_ids):
for rule_id in rule_ids:
self.db.security_group_default_rule_destroy(context, rule_id)
def add_default_rules(self, context, vals):
rules = [self.db.security_group_default_rule_create(context, v)
for v in vals]
return rules
def default_rule_exists(self, context, values):
"""Indicates whether the specified rule values are already
defined in the default security group rules.
"""
for rule in self.db.security_group_default_rule_list(context):
keys = ('cidr', 'from_port', 'to_port', 'protocol')
for key in keys:
if rule.get(key) != values.get(key):
break
else:
return rule.get('id') or True
return False
def get_all_default_rules(self, context):
try:
rules = self.db.security_group_default_rule_list(context)
except Exception:
msg = 'cannot get default security group rules'
raise exception.SecurityGroupDefaultRuleNotFound(msg)
return rules
def get_default_rule(self, context, id):
return self.db.security_group_default_rule_get(context, id)
def validate_id(self, id):
try:
return int(id)
except ValueError:
msg = _("Security group id should be integer")
self.raise_invalid_property(msg)
def _refresh_instance_security_rules(self, context, instances):
for instance in instances:
if instance.host is not None:
self.compute_rpcapi.refresh_instance_security_rules(
context, instance.host, instance)
def trigger_rules_refresh(self, context, id):
"""Called when a rule is added to or removed from a security_group."""
instances = objects.InstanceList.get_by_security_group_id(context, id)
self._refresh_instance_security_rules(context, instances)
def trigger_members_refresh(self, context, group_ids):
"""Called when a security group gains a new or loses a member.
Sends an update request to each compute node for each instance for
which this is relevant.
"""
instances = objects.InstanceList.get_by_grantee_security_group_ids(
context, group_ids)
self._refresh_instance_security_rules(context, instances)
def get_instance_security_groups(self, context, instance_uuid,
detailed=False):
if detailed:
return self.db.security_group_get_by_instance(context,
instance_uuid)
instance = objects.Instance(uuid=instance_uuid)
groups = objects.SecurityGroupList.get_by_instance(context, instance)
return [{'name': group.name} for group in groups]
def populate_security_groups(self, instance, security_groups):
if not security_groups:
# Make sure it's an empty list and not None
security_groups = []
instance.security_groups = security_group_obj.make_secgroup_list(
security_groups)
|
{
"content_hash": "85f0ef9a18feacea2d6ee49d5f6e3698",
"timestamp": "",
"source": "github",
"line_count": 4186,
"max_line_length": 79,
"avg_line_length": 44.338509316770185,
"alnum_prop": 0.5761499129853827,
"repo_name": "j-carpentier/nova",
"id": "c74920ba78632c95075deaad2282d222609427ac",
"size": "186415",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/compute/api.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16740777"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "259647"
}
],
"symlink_target": ""
}
|
from django.db import models
def esCUITValida(cuit):
cuit = str(cuit)
cuit = cuit.replace("-", "")
cuit = cuit.replace(" ", "")
cuit = cuit.replace(".", "")
if len(cuit) != 11:
return False
if not cuit.isdigit():
return False
base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]
aux = 0
for i in xrange(10):
aux += int(cuit[i]) * base[i]
aux = 11 - (aux % 11)
if aux == 11:
aux = 0
elif aux == 10:
aux = 9
if int(cuit[10]) == aux:
return True
else:
return False
class Institucion(models.Model):
ACTIVA = 1
ANUALDA = 2
nombre = models.CharField(max_length=100, null=False, blank=False)
cuil = models.CharField(max_length=13, null=False, blank=False)
direccion = models.CharField(max_length=100, null=False, blank=False)
estado = models.IntegerField(choices=[(ACTIVA, ACTIVA), (ANUALDA, ANUALDA)],
default=ACTIVA)
usuario_creador = models.CharField(max_length=30, default='admin')
fecha_creacion = models.DateTimeField(auto_now_add=True)
usuario_modificador = models.CharField(max_length=30,
default='admin')
fecha_modificacion = models.DateTimeField(auto_now=True)
def __str__(self):
return self.nombre
def set_nombre(self, nombre):
if nombre == '':
raise Exception("El nombre no puede estar vacío.")
self.nombre = nombre
def set_direccion(self, direccion):
if direccion == '':
raise Exception("La dirección no puede estar vacía.")
self.direccion = direccion
def set_cuil(self, cuil):
if not esCUITValida(cuil):
raise Exception("El cuil no es válido.")
self.cuil = cuil
|
{
"content_hash": "313255c2a0fc912684933781e1be5c5f",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 80,
"avg_line_length": 26.61111111111111,
"alnum_prop": 0.5391440501043842,
"repo_name": "yo-alan/horarios",
"id": "d428d7ab4026e18fd5cfac680a843d72d4c10901",
"size": "1944",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "perfil/objects/institucion.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "149802"
},
{
"name": "HTML",
"bytes": "259116"
},
{
"name": "JavaScript",
"bytes": "667431"
},
{
"name": "Python",
"bytes": "124373"
}
],
"symlink_target": ""
}
|
"""Div. Utilities."""
from __future__ import absolute_import, unicode_literals, print_function
from .encoding import default_encode
import sys
def emergency_dump_state(state, open_file=open, dump=None, stderr=None):
"""Dump message state to stdout or file."""
from pprint import pformat
from tempfile import mktemp
stderr = sys.stderr if stderr is None else stderr
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
print('EMERGENCY DUMP STATE TO FILE -> {0} <-'.format(persist), # noqa
file=stderr)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception as exc:
print( # noqa
'Cannot pickle state: {0!r}. Fallback to pformat.'.format(exc),
file=stderr,
)
fh.write(default_encode(pformat(state)))
finally:
fh.flush()
fh.close()
return persist
|
{
"content_hash": "f04be9c075bda86bbb33dc55c2433c3b",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 28.794117647058822,
"alnum_prop": 0.5965270684371808,
"repo_name": "mdworks2016/work_development",
"id": "db52048c8bb92e9884c0dd12d61b7171be0266a1",
"size": "979",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "Python/20_Third_Certification/venv/lib/python3.7/site-packages/kombu/utils/div.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "142"
},
{
"name": "Kotlin",
"bytes": "68744"
},
{
"name": "Python",
"bytes": "1080"
}
],
"symlink_target": ""
}
|
'''
Home of the code which knows how to actually *draw* a graph which
has been layed out.
'''
class GraphRenderer:
'''
Only a "renderer" knows how big each node will end up based on metadata
in the graph which is specific to the renderer.
All renderers should be able to set the (x,y) "size" tuple
'''
def __init__(self, graph):
self._vg_graph = graph
self._vg_layout = None
self._vg_rendering = False
self._vg_canvas_width = 0
self._vg_canvas_height = 0
def beginRender(self, width, height):
self._vg_rendering = True
self._vg_canvas_width = width
self._vg_canvas_height = height
def endRender(self):
self._vg_rendering = False
def setNodeSizes(self, graph):
'''
Calculate the sizes for each node based on graph metadata (or defaults)
'''
raise Exception('%s must implement setNodeSizes!' % self.__class__.__name__)
def renderNode(self, nid, ninfo, xpos, ypos):
'''
Render the given node at the specified position.
'''
raise Exception('%s must implement renderNode!' % self.__class__.__name__)
def renderEdge(self, eid, einfo, points):
'''
Render an edge in the graph by drawing lines between all the listed
points (as (x,y) tuples...)
'''
raise Exception('%s must implement renderEdge!' % self.__class__.__name__)
def renderGraph(self):
graph = self._vg_graph
width, height = graph.getMeta('size',(800,600))
self.beginRender(width, height)
# Render each of the nodes (except ghost nodes...)
for nid,ninfo in graph.getNodes():
if ninfo.get('ghost'):
continue
xpos, ypos = ninfo.get('position')
self.renderNode(nid, ninfo, xpos, ypos)
# Render the edges
for eid, fromid, toid, einfo in graph.getEdges():
points = einfo.get('edge_points')
if points != None:
self.renderEdge(eid, einfo, points)
self.endRender()
|
{
"content_hash": "02acd54c840642edddea26843dac1e5d",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 30.085714285714285,
"alnum_prop": 0.5826210826210826,
"repo_name": "HackerTool/vivisect",
"id": "2c4ef7cb850e544bff691fd3b6d724e4baacf46e",
"size": "2106",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "visgraph/renderers/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "167795"
},
{
"name": "CSS",
"bytes": "15980"
},
{
"name": "Makefile",
"bytes": "355"
},
{
"name": "Python",
"bytes": "11384786"
},
{
"name": "Shell",
"bytes": "476"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
# The default socket timeout, used by httplib to indicate that no timeout was
# specified by the user
from socket import _GLOBAL_DEFAULT_TIMEOUT
import time
from ..exceptions import TimeoutStateError
# A sentinel value to indicate that no timeout was specified by the user in
# urllib3
_Default = object()
def current_time():
"""
Retrieve the current time. This function is mocked out in unit testing.
"""
return time.time()
class Timeout(object):
""" Timeout configuration.
Timeouts can be defined as a default for a pool::
timeout = Timeout(connect=2.0, read=7.0)
http = PoolManager(timeout=timeout)
response = http.request('GET', 'http://example.com/')
Or per-request (which overrides the default for the pool)::
response = http.request('GET', 'http://example.com/', timeout=Timeout(10))
Timeouts can be disabled by setting all the parameters to ``None``::
no_timeout = Timeout(connect=None, read=None)
response = http.request('GET', 'http://example.com/, timeout=no_timeout)
:param total:
This combines the connect and read timeouts into one; the read timeout
will be set to the time leftover from the connect attempt. In the
event that both a connect timeout and a total are specified, or a read
timeout and a total are specified, the shorter timeout will be applied.
Defaults to None.
:type total: integer, float, or None
:param connect:
The maximum amount of time to wait for a connection attempt to a server
to succeed. Omitting the parameter will default the connect timeout to
the system default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout for connection attempts.
:type connect: integer, float, or None
:param read:
The maximum amount of time to wait between consecutive
read operations for a response from the server. Omitting
the parameter will default the read timeout to the system
default, probably `the global default timeout in socket.py
<http://hg.python.org/cpython/file/603b4d593758/Lib/socket.py#l535>`_.
None will set an infinite timeout.
:type read: integer, float, or None
.. note::
Many factors can affect the total amount of time for urllib3 to return
an HTTP response.
For example, Python's DNS resolver does not obey the timeout specified
on the socket. Other factors that can affect total request time include
high CPU load, high swap, the program running at a low priority level,
or other behaviors.
In addition, the read and total timeouts only measure the time between
read operations on the socket connecting the client and the server,
not the total amount of time for the request to return a complete
response. For most requests, the timeout is raised because the server
has not sent the first byte in the specified time. This is not always
the case; if a server streams one byte every fifteen seconds, a timeout
of 20 seconds will not trigger, even though the request will take
several minutes to complete.
If your goal is to cut off any request after a set amount of wall clock
time, consider having a second "watcher" thread to cut off a slow
request.
"""
#: A sentinel object representing the default timeout value
DEFAULT_TIMEOUT = _GLOBAL_DEFAULT_TIMEOUT
def __init__(self, total=None, connect=_Default, read=_Default):
self._connect = self._validate_timeout(connect, 'connect')
self._read = self._validate_timeout(read, 'read')
self.total = self._validate_timeout(total, 'total')
self._start_connect = None
def __str__(self):
return '%s(connect=%r, read=%r, total=%r)' % (
type(self).__name__, self._connect, self._read, self.total)
@classmethod
def _validate_timeout(cls, value, name):
""" Check that a timeout attribute is valid.
:param value: The timeout value to validate
:param name: The name of the timeout attribute to validate. This is
used to specify in error messages.
:return: The validated and casted version of the given value.
:raises ValueError: If the type is not an integer or a float, or if it
is a numeric value less than zero.
"""
if value is _Default:
return cls.DEFAULT_TIMEOUT
if value is None or value is cls.DEFAULT_TIMEOUT:
return value
try:
float(value)
except (TypeError, ValueError):
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
try:
if value < 0:
raise ValueError("Attempted to set %s timeout to %s, but the "
"timeout cannot be set to a value less "
"than 0." % (name, value))
except TypeError: # Python 3
raise ValueError("Timeout value %s was %s, but it must be an "
"int or float." % (name, value))
return value
@classmethod
def from_float(cls, timeout):
""" Create a new Timeout from a legacy timeout value.
The timeout value used by httplib.py sets the same timeout on the
connect(), and recv() socket requests. This creates a :class:`Timeout`
object that sets the individual timeouts to the ``timeout`` value
passed to this function.
:param timeout: The legacy timeout value.
:type timeout: integer, float, sentinel default object, or None
:return: Timeout object
:rtype: :class:`Timeout`
"""
return Timeout(read=timeout, connect=timeout)
def clone(self):
""" Create a copy of the timeout object
Timeout properties are stored per-pool but each request needs a fresh
Timeout object to ensure each one has its own start/stop configured.
:return: a copy of the timeout object
:rtype: :class:`Timeout`
"""
# We can't use copy.deepcopy because that will also create a new object
# for _GLOBAL_DEFAULT_TIMEOUT, which socket.py uses as a sentinel to
# detect the user default.
return Timeout(connect=self._connect, read=self._read,
total=self.total)
def start_connect(self):
""" Start the timeout clock, used during a connect() attempt
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to start a timer that has been started already.
"""
if self._start_connect is not None:
raise TimeoutStateError("Timeout timer has already been started.")
self._start_connect = current_time()
return self._start_connect
def get_connect_duration(self):
""" Gets the time elapsed since the call to :meth:`start_connect`.
:return: Elapsed time.
:rtype: float
:raises urllib3.exceptions.TimeoutStateError: if you attempt
to get duration for a timer that hasn't been started.
"""
if self._start_connect is None:
raise TimeoutStateError("Can't get connect duration for timer "
"that has not started.")
return current_time() - self._start_connect
@property
def connect_timeout(self):
""" Get the value to use when setting a connection timeout.
This will be a positive float or integer, the value None
(never timeout), or the default system timeout.
:return: Connect timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
"""
if self.total is None:
return self._connect
if self._connect is None or self._connect is self.DEFAULT_TIMEOUT:
return self.total
return min(self._connect, self.total)
@property
def read_timeout(self):
""" Get the value for the read timeout.
This assumes some time has elapsed in the connection timeout and
computes the read timeout appropriately.
If self.total is set, the read timeout is dependent on the amount of
time taken by the connect timeout. If the connection time has not been
established, a :exc:`~urllib3.exceptions.TimeoutStateError` will be
raised.
:return: Value to use for the read timeout.
:rtype: int, float, :attr:`Timeout.DEFAULT_TIMEOUT` or None
:raises urllib3.exceptions.TimeoutStateError: If :meth:`start_connect`
has not yet been called on this object.
"""
if (self.total is not None and
self.total is not self.DEFAULT_TIMEOUT and
self._read is not None and
self._read is not self.DEFAULT_TIMEOUT):
# In case the connect timeout has not yet been established.
if self._start_connect is None:
return self._read
return max(0, min(self.total - self.get_connect_duration(),
self._read))
elif self.total is not None and self.total is not self.DEFAULT_TIMEOUT:
return max(0, self.total - self.get_connect_duration())
else:
return self._read
|
{
"content_hash": "8491e087258fe1aed20df098a1f21186",
"timestamp": "",
"source": "github",
"line_count": 242,
"max_line_length": 82,
"avg_line_length": 40.65289256198347,
"alnum_prop": 0.6195364911567391,
"repo_name": "pogaku9/aws-datalake-quickstart-looker-isv-integration",
"id": "a166e8da478f2fe1c3d2d986a4b77090547f8725",
"size": "9838",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "scripts/lambdas/writetoES/urllib3/util/timeout.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "167786"
},
{
"name": "JavaScript",
"bytes": "15831"
},
{
"name": "PHP",
"bytes": "7305911"
},
{
"name": "Python",
"bytes": "1759375"
},
{
"name": "Shell",
"bytes": "8532"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.