prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python
"""
unit test for filters module
author: Michael Grupp
This file is part of evo (github.com/MichaelGrupp/evo).
evo is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
evo is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with evo. If not, see <http://www.gnu.org/licenses/>.
"""
import math
import unittest
import numpy as np
from evo.core import filters
from evo.core import lie_algebra as lie
# TODO: clean these up and use proper fixtures.
POSES_1 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
POSES_2 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 0.5])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 1.0]))
]
POSES_3 = [
lie.se3(np.eye(3), np.array([0, 0, 0.0])),
lie.se3(n | p.eye(3), np.array([0, 0, 0.9])),
lie.se3(np.eye(3), np.array([0, 0, 0.99])),
lie.se3(np.eye(3), np.array([0, 0, 0.999])) | ,
lie.se3(np.eye(3), np.array([0, 0, 0.9999])),
lie.se3(np.eye(3), np.array([0, 0, 0.99999])),
lie.se3(np.eye(3), np.array([0, 0, 0.999999])),
lie.se3(np.eye(3), np.array([0, 0, 0.9999999]))
]
POSES_4 = [
lie.se3(np.eye(3), np.array([0, 0, 0])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1])),
lie.se3(np.eye(3), np.array([0, 0, 1]))
]
class TestFilterPairsByPath(unittest.TestCase):
def test_poses1_all_pairs(self):
target_path = 1.0
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 2), (2, 3)])
def test_poses1_wrong_target(self):
target_path = 2.5
tol = 0.0
id_pairs = filters.filter_pairs_by_path(POSES_1, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [])
def test_poses2_all_pairs_low_tolerance(self):
target_path = 1.0
tol = 0.001
id_pairs = filters.filter_pairs_by_path(POSES_2, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 3)])
def test_convergence_all_pairs(self):
target_path = 1.0
tol = 0.2
id_pairs = filters.filter_pairs_by_path(POSES_3, target_path, tol,
all_pairs=True)
self.assertEqual(id_pairs, [(0, 7)])
axis = np.array([1, 0, 0])
POSES_5 = [
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi / 3), np.array([0, 0, 0])),
lie.se3(lie.so3_exp(axis * math.pi), np.array([0, 0, 0]))
]
TRANSFORM = lie.random_se3()
POSES_5_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_5]
axis = np.array([1, 0, 0])
p0 = lie.se3(lie.so3_exp(axis * 0.0), np.array([0, 0, 0]))
pd = lie.se3(lie.so3_exp(axis * (math.pi / 3.)), np.array([1, 2, 3]))
p1 = np.dot(p0, pd)
p2 = np.dot(p1, pd)
p3 = np.dot(p2, pd)
POSES_6 = [p0, p1, p2, p3, p3]
POSES_6_TRANSFORMED = [TRANSFORM.dot(p) for p in POSES_6]
class TestFilterPairsByAngle(unittest.TestCase):
def test_poses5(self):
tol = 0.001
expected_result = [(0, 1), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi - tol
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses5_all_pairs(self):
tol = 0.01
expected_result = [(0, 1), (0, 4), (1, 2), (2, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_5, POSES_5_TRANSFORMED):
target_angle = math.pi
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
# Check for same result when using degrees:
target_angle = np.rad2deg(target_angle)
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True,
degrees=True)
self.assertEqual(id_pairs, expected_result)
def test_poses6(self):
tol = 0.001
target_angle = math.pi - tol
expected_result = [(0, 3)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=False)
self.assertEqual(id_pairs, expected_result)
def test_poses6_all_pairs(self):
target_angle = math.pi
tol = 0.001
expected_result = [(0, 3), (0, 4)]
# Result should be unaffected by global transformation.
for poses in (POSES_6, POSES_6_TRANSFORMED):
id_pairs = filters.filter_pairs_by_angle(poses, target_angle, tol,
all_pairs=True)
self.assertEqual(id_pairs, expected_result)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""create_table_container
Revision ID: 9fe371393a24
Revises: a9a92eebd9a8
Create Date: 2016-06-12 16:09:35.686539
"""
# revision identifiers, used by Alembic.
revision = '9fe371393a24'
down_revision = 'a9a92eebd9a8'
branch_labels = None
depends_on = None
fr | om alembic import op
import sqlalchemy as sa
import zun
def upgrade():
op.create_table(
'container',
sa.Colum | n('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('project_id', sa.String(length=255), nullable=True),
sa.Column('user_id', sa.String(length=255), nullable=True),
sa.Column('uuid', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('image', sa.String(length=255), nullable=True),
sa.Column('command', sa.String(length=255), nullable=True),
sa.Column('status', sa.String(length=20), nullable=True),
sa.Column('environment', zun.db.sqlalchemy.models.JSONEncodedDict(),
nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('uuid', name='uniq_container0uuid')
)
|
import datetime
from tracker.models import Member, Report
from django.template.defaultfilters import slugify
import csv
import urllib
import simplejson as json
from dateutil.parser import *
import time
def update_twitter(branch='house', official=True, batch=1):
if official:
screen_names = [x.official_twitter_name for x in Member.objects.filter(branch=branch, official_twitter_name__isnull=False).order_by('last_name')]
else:
screen_names = [x.campaign_twitter_name for x in Member.objects.filter(branch=branch, campaign_twitter_name__isnull=False).order_by('last_name')]
if batch == 1:
screen_names = screen_names[:100]
elif batch == 2:
screen_names = screen_names[100:200]
elif batch == 3:
screen_names = screen_names[200:300]
elif batch == 4:
screen_names = screen_names[300:400]
elif batch == 5:
screen_names = screen_names[400:]
url = "http://api.twitter.com/1/users/lookup.json?screen_name=%s" % ",".join(screen_names)
response = urllib.urlopen(url).read()
results = json.loads(response)
for result in results:
if official:
member = Member.objects.get(official_twitter_name__iexact=result['screen_name'])
report, created = Report.objects.get_or_create(member=member, date=datetime.date.today())
report.official_twitter_followers=result['followers_count']
report.official_twitter_updates=result['statuses_count']
report.save()
else:
member = Member.objects.get(campaign_twitter_name__iexact=result['screen_name'])
report, created = Report.objects.get_or_create(member=member, date=datetime.date.today())
report.campaign_twitter_followers=result['followers_count']
report.campaign_twitter_updates=result['statuses_count']
report.save()
def update_facebook(members, token):
for member in members:
print member
report, created = Report.objects.get_or_create(member=member, date=datetime.date.today())
params = {}
params['access_token'] = token
batch = [{'method': 'GET', 'relative_url': str(member.official_facebook_name)}, {'method': 'GET', 'relative_url': str(member.campaign_facebook_name)}]
params['batch'] = [x for x in batch if x['relative_url'] != '']
encoded_params = urllib.urlencode(params)
f = urllib.urlopen("https://graph.facebook.com", encoded_params).read()
results = json.loads(f)
for result in results:
try:
body = json.loads(result['body'])
except:
continue
if body == False:
continue
else:
try:
if str(member.official_facebook_name.lower()) == body['username'].lower():
report.official_facebook_likes= body['likes']
elif str(member.campaign_facebook_name.lower()) == body['username'].lower():
report.campaign_facebook_likes= body['likes']
except:
try:
if member.official_facebook_name == body['id']:
report.official_facebook_likes= body['likes']
elif member.campaign_facebook_name == body['id']:
report.campaign_facebook_likes= body['likes']
except KeyError:
print "No match found for %s" % member
report.save()
time.sleep(3)
def update_member(member):
official_likes = member.facebook_likes(member.official_facebook_name, token)
campaign_likes = member.facebook_likes(member.campaign_facebook_name, token)
report, created = Report.objects.get_or_create(member=member, date=datetime.date.today(), official_twitter_followers=official_twitter, official_facebook_likes=official_likes, campaign_facebook_likes=campaign_likes, campaign_twitter_followers=campaign_twitter)
report.save()
def load_chamber(chamber):
if chamber == 'senate':
f = open("senate.csv","r")
elif chamber == 'house':
f = open("house.csv","r")
else:
raise("Must be house or senate")
rows = csv.DictReader(f, delimiter=',')
for row in rows:
| member, created = Member.objects.get_or_create(last_name=row['last'], first_name=row['first'], slug=slugify(row['first']+' '+row['last']), party=row['party'], branch=chamber, state=row['state'], district=row['district'])
if row['username'] != '':
member.official_facebook_name = row['username']
| member.save()
elif row['username_campaign'] != '':
member.campaign_facebook_name = row['username_campaign']
member.save()
if row['twitter'] != '':
member.official_twitter_name = row['twitter']
member.save()
def update_from_al():
f = open("congress_upload_9_14_11.csv","r")
rows = csv.DictReader(f, delimiter=',')
for row in rows:
print row['Name']
member, created = Member.objects.get_or_create(bioguide_id=row['bioguide'])
member.date_of_birth = parse(str(row['dob'])).date()
member.race = row['race']
member.gender = row['gender']
member.service = int(row['service'])
member.status = row['status'][0]
member.youtube_name = row['youtube_name']
member.margin_2010 = float(row['margin_2010'])
member.social_networks = int(row['social_networks'])
if row['facebook_10'] == '':
member.facebook_10 = None
else:
member.facebook_10 = int(row['facebook_10'])
member.facebook_status = int(row['facebook_status'])
if row['twitter_10'] == '':
member.twitter_10 = None
else:
member.twitter_10 = int(row['twitter_10'])
member.twitter_status = int(row['twitter_status'])
if row['official_twitter_name'] == '':
member.official_twitter_name = None
else:
member.official_twitter_name = row['official_twitter_name']
if row['campaign_twitter_name'] == '':
member.campaign_twitter_name = None
else:
member.campaign_twitter_name = row['campaign_twitter_name']
if row['index_10'] == None:
member.index_10 = None
else:
member.index_10 = int(row['index_10'])
member.save()
|
tions for each source in the child
catalog.
"""
self.haystack = self.cat[[self.lon_col,
self.lat_col]].values
def _match(self):
"""
Match each source in the child catalog to the BGPS.
"""
self.matched_ix = {}
ids = self.cat.index
haystack = self.haystack
for cat_ix, coord in zip(ids, haystack):
cnum = sample_bgps_img(coord[0], coord[1], v=self.v)
self._enter_matched(cat_ix, cnum)
def _enter_matched(self, ix, cnum):
"""
Insert the indices of the matched sources into the matched source
dictionary with the BGPS cnum as the key.
"""
self.cat.loc[ix, 'in_bgps'] = cnum
if (not _np.isnan(cnum)) & (cnum != 0):
self.matched_ix.setdefault(cnum, [])
self.matched_ix[cnum].append(ix)
def _drop_orig_cols(self):
"""
Drop original columns to the BGPS leaving just the matched and the
catlaog number as the index.
"""
self.bgps_culled = self.bgps.drop(labels=self.bgps.columns[
:self.n_bgps_cols], axis=1)
def process(self):
"""
Simple processing to add:
number of matches
number of detections
If `det_col` is specified.
all columns
If `choose_col` and `noise_col` are specified then the
maximum in `choose_col` will be used, or if all are null
values, then source with minimum noise will be chosen.
If `noise_col` is `None` then the first source in the sub-index
will be used.
columns to BGPS catalog.
"""
# New column for number of matched sources
for cnum, cat_indices in self.matched_ix.iteritems():
self.bgps.ix[cnum, self.bgps_count_col] = len(cat_indices)
if self.det_col is not None:
matches = self.cat.ix[cat_indices, self.det_col]
num_dets = matches[matches.isin(self.det_flags)].shape[0]
self.bgps.ix[cnum, self.bgps_det_col] = num_dets
if self.choose_col is not None:
choose_ix = self.cat.ix[cat_indices, self.choose_col].idxmax()
if _np.isnan(choose_ix) & (self.noise_col is not None):
choose_ix = self.cat.ix[cat_indices,
self.noise_col].idxmin()
else:
choose_ix = cat_indices[0]
self.bgps.ix[cnum, self.cat.columns] = self.cat.ix[choose_ix]
self.bgps = self.bgps.rename(columns={col: self.name + '_' + col for
col in self.bgps.columns[
self.n_bgps_cols:]})
self._drop_orig_cols()
def to_csv(self):
"""
Write BGPS catalog to `.csv` file.
"""
self.cat.to_csv('cat_' + self.name + '.csv')
self.bgps.to_csv('bgps_' + self.name + '.csv')
class DataSet(object):
def __init__(self):
self.all_data = []
all_objs = [
WaterGbt,
WaterArcetri,
WaterHops,
WaterRms,
Cornish,
Egos,
AmmoniaGbt,
MethoPandian,
MethoPestalozzi,
MethoMmb,
Higal70,
RedSpitzer,
RedMsx,
Molcat,
WienenNh3,
MipsgalCatalog,
MipsgalArchive,
]
for obj in all_objs:
data = obj()
data.match()
data.write()
self.all_data.append(data)
self.process()
def _merge(self):
print '-- Merging data'
merged_data = catalog.read_cat('bgps_v210').set_index('v210cnum')
for data in self.all_data:
merge_cat = data.matcher.bgps_culled
merged_data = merged_data.merge(merge_cat,
left_index=True,
right_index=True)
self.merged_data = merged_data
def _append_evo_flags(self):
print '-- Adding evolutionary flags'
self.merged_data = append_evo_flags(bgps=self.merged_data)
def _write(self):
print '-- Writing all merged data'
self.merged_data.to_csv('bgps_v210_all_full.csv', index=False)
def process(self):
self._merge()
self._append_evo_flags()
self._write()
class Data(object):
"""
Parent class for object-catalogs to be matched with `Matcher`
"""
def match(self):
print '-- Matching {0}'.format(self.name)
self.matcher = Matcher(self)
self.matcher.process()
def write(self):
self.matcher.to_csv()
def append_evo_flags(bgps):
"""
Calculate and append evolutionary flags to BGPS catalog
Parameters
----------
Returns
-------
"""
evo_flags = ['h2o_f', 'ch3oh_f', 'ego_f', 'ir_f', 'uchii_f', 'sf_f']
for col in evo_flags:
bgps[col] = _np.nan
# H2O flags
bgps['h2o_f'][((bgps['h2o_gbt_n'] > 0) & (bgps['h2o_gbt_f'] == 0)) &
_np.logical_not(bgps['h2o_arc_f'] > 0) &
_np.logical_not(bgps['h2o_rms_n'] > 0) &
_np.logical_not(bgps['h2o_hops_n'] > 0)] = 0
bgps['h2o_f'][(bgps['h2o_gbt_f'] > 0) |
(bgps['h2o_rms_n'] > 0) |
(bgps['h2o_arc_n'] > 0) |
(bgps['h2o_hops_n'] > 0)] = 1
# CH3OH flags
bgps['ch3oh_f'][(bgps['ch3oh_pesta_n'] > 0) |
(bgps['ch3oh_pandi_n'] > 0) |
(bgps['ch3oh_mmb_n'] > 0)] = 1
# EGO flags
bgps['ego_f'][(bgps['ego_n'] > 0)] = 1
# IR flags
bgps['ir_f'][(bgps['robit_f'] > 0) |
(bgps['red_msx_f'] > 0) |
(bgps['ego_n'] > 0)] = 1 # IR YSO
bgps['ir_f'][(bgps['ir_f'] != 1) &
(bgps['robit_n'] > 0) &
(bgps['robit_f'] == 0)] = 2 # robitaille AGB
# UCHII flags
bgps['uchii_f'][(bgps['corn_n'] > 0)] = 1
# Starless
bgps['sf_f'][(bgps['h2o_f'] == 0) &
(bgps['ch3oh_f'] != 1) &
(bgps['ir_f'] != 1) &
| (bgps['uchii_f'] != 1)] = 0
bgps['sf_f'][(bgps['h2o_f'] == 1) |
(bgps['ch3oh_f'] == 1) |
(bgps['ir_f'] == 1) |
(bgps['uchii_f'] == 1)] = 1
return bgps
######################################################################## | #######
# Catalog Data Objects
###############################################################################
class WaterGbt(Data):
def __init__(self):
# Catalog parameters
self.name = 'h2o_gbt'
self.cat = catalog.read_cat('gbt_h2o')
self.lon_col = 'h2o_glon'
self.lat_col = 'h2o_glat'
self.det_col = 'h2o_f'
self.det_flags = [1]
self.choose_col = 'h2o_tpk'
self.noise_col = 'h2o_tpk_err'
class WaterArcetri(Data):
def __init__(self):
# Catalog parameters
self.name = 'h2o_arc'
self.cat = catalog.read_cat('valdettaro01_arcetri')
self.lon_col = '_Glon'
self.lat_col = '_Glat'
self.det_col = 'h2o_f'
self.det_flags = [1]
self.choose_col = 'Stot'
self.noise_col = 'Sig'
class WaterHops(Data):
def __init__(self):
# Catalog parameters
self.name = 'h2o_hops'
self.cat = catalog.read_cat('walsh14_hops_h2o')
self.lon_col = '_Glon'
self.lat_col = '_Glat'
self.det_col = None
self.det_flags = None
self.choose_col = 'Sp'
self.noise_col = None
class WaterRms(Data):
def __init__(self):
# Catalog parameters
self.name = 'h2o_rms'
self.cat = catalog.read_cat('urquhart11_red_msx_h2o')
self.lon_col = '_Glon_1_'
self.lat_col = '_Glat_1_'
self.det_col = 'H2O_1_'
self.det_flags = ['y']
self.choose_col = |
baseInstance(BaseResource):
"""
This class represents a MySQL instance in the cloud.
"""
def __init__(self, *args, **kwargs):
super(CloudDatabaseInstance, self).__init__(*args, **kwargs)
self._database_manager = CloudDatabaseDatabaseManager(self.manager.api,
resource_class=CloudDatabaseDatabase, response_key="database",
uri_base="instances/%s/databases" % self.id)
self._user_manager = CloudDatabaseUserManager(self.manager.api,
resource_class=CloudDatabaseUser, response_key="user",
uri_base="instances/%s/users" % self.id)
# Add references to the parent instance to the managers.
self._database_manager.instance = self._user_manager.instance = self
# Remove the lazy load
if not self.loaded:
self.get()
def get(self):
"""
Need to override the default get() behavior by making the 'volume'
attribute into a CloudDatabaseVolume object instead of the raw dict.
"""
super(CloudDatabaseInstance, self).get()
# Make the volume into an accessible object instead of a dict
self.volume = CloudDatabaseVolume(self, self.volume)
def list_databases(self, limit=None, marker=None):
"""Returns a list of the names of all databases for this instance."""
return self._database_manager.list(limit=limit, marker=marker)
def list_users(self, limit=None, marker=None):
"""Returns a list of the names of all users for this instance."""
return self._user_manager.list(limit=limit, marker=marker)
def get_user(self, name):
"""
Finds the user in this instance with the specified name, and
returns a CloudDatabaseUser object. If no match is found, a
NoSuchDatabaseUser exception is raised.
"""
try:
return self._user_manager.get(name)
except exc.NotFound:
raise exc.NoSuchDatabaseUser("No user by the name '%s' exists." %
name)
def get_database(self, name):
"""
Finds the database in this instance with the specified name, and
returns a CloudDatabaseDatabase object. If no match is found, a
NoSuchDatabase exception is raised.
"""
try:
return [db for db in self.list_databases()
if db.name == name][0]
except IndexError:
raise exc.NoSuchDatabase("No database by the name '%s' exists." %
name)
def create_database(self, name, character_set=None, collate=None):
"""
Creates a database with the specified name. If a database with
that name already exists, a BadRequest (400) exception will
be raised.
"""
if character_set is None:
character_set = "utf8"
if collate is None:
collate = "utf8_general_ci"
self._database_manager.create(name=name, character_set=character_set,
collate=collate, return_none=True)
# Since the API doesn't return the info for creating the database
# object, we have to do it manually.
return self._database_manager.find(name=name)
def create_user(self, name, password, database_names, host=None):
"""
Creates a user with the specified name and password, and gives that
user access to the specified database(s).
If a user with that name already exists, a BadRequest (400) exception
will be raised.
"""
if not isinstance(database_names, (list, tuple)):
database_names = [database_names]
# The API only accepts names, not DB objects
database_names = [db if isinstance(db, six.string_types) else db.name
for db in database_names]
self._user_manager.create(name=name, password=password,
database_names=database_names, host=host, return_none=True)
# Since the API doesn't return th | e info for creating the user object,
# we have t | o do it manually.
return self._user_manager.find(name=name)
def delete_database(self, name_or_obj):
"""
Deletes the specified database. If no database by that name
exists, no exception will be raised; instead, nothing at all
is done.
"""
name = utils.get_name(name_or_obj)
self._database_manager.delete(name)
def change_user_password(self, user, new_pass):
"""
Changes the password for the user to the supplied value.
Returns None upon success; raises PasswordChangeFailed if the call
does not complete successfully.
"""
return self._user_manager.change_user_password(user, new_pass)
def update_user(self, user, name=None, password=None, host=None):
"""
Allows you to change one or more of the user's username, password, or
host.
"""
return self._user_manager.update(user, name=name, password=password,
host=host)
def list_user_access(self, user):
"""
Returns a list of all database names for which the specified user
has access rights.
"""
return self._user_manager.list_user_access(user)
def grant_user_access(self, user, db_names, strict=True):
"""
Gives access to the databases listed in `db_names` to the user.
"""
return self._user_manager.grant_user_access(user, db_names,
strict=strict)
def revoke_user_access(self, user, db_names, strict=True):
"""
Revokes access to the databases listed in `db_names` for the user.
"""
return self._user_manager.revoke_user_access(user, db_names,
strict=strict)
def delete_user(self, user):
"""
Deletes the specified user. If no user by that name
exists, no exception will be raised; instead, nothing at all
is done.
"""
name = utils.get_name(user)
self._user_manager.delete(name)
def enable_root_user(self):
"""
Enables login from any host for the root user and provides
the user with a generated root password.
"""
uri = "/instances/%s/root" % self.id
resp, body = self.manager.api.method_post(uri)
return body["user"]["password"]
def root_user_status(self):
"""
Returns True or False, depending on whether the root user
for this instance has been enabled.
"""
uri = "/instances/%s/root" % self.id
resp, body = self.manager.api.method_get(uri)
return body["rootEnabled"]
def restart(self):
"""Restarts this instance."""
self.manager.action(self, "restart")
def resize(self, flavor):
"""Set the size of this instance to a different flavor."""
# We need the flavorRef, not the flavor or size.
flavorRef = self.manager.api._get_flavor_ref(flavor)
body = {"flavorRef": flavorRef}
self.manager.action(self, "resize", body=body)
def resize_volume(self, size):
"""Changes the size of the volume for this instance."""
curr_size = self.volume.size
if size <= curr_size:
raise exc.InvalidVolumeResize("The new volume size must be larger "
"than the current volume size of '%s'." % curr_size)
body = {"volume": {"size": size}}
self.manager.action(self, "resize", body=body)
def list_backups(self):
"""
Returns a list of all backups for this instance.
"""
return self.manager._list_backups_for_instance(self)
def create_backup(self, name, description=None):
"""
Creates a backup of this instance, giving it the specified name along
with an optional description.
"""
return self.manager.create_backup(self, name, description=description)
def _get_flavor(self):
try:
ret = self._flavor
except AttributeError:
ret = self._flavor = CloudDatabaseFlavor(
|
if to_add:
self.add_tags(to_add, auth=auth, save=save, log=log, system=system)
if to_remove:
self.remove_tags(to_remove, auth=auth, save=save)
def add_tags(self, tags, auth=None, save=True, log=True, system=False):
"""
Optimization method for use with update_tags. Unlike add_tag, already assumes tag is
not on the object.
"""
if not system and not auth:
raise ValueError('Must provide auth if adding a non-system tag')
for tag in tags:
tag_instance, created = Tag.all_tags.get_or_create(name=tag, system=system)
self.tags.add(tag_instance)
# TODO: Logging belongs in on_tag_added hook
if log:
self.add_tag_log(tag_instance, auth)
self.on_tag_added(tag_instance)
if save:
self.save()
def add_tag(self, tag, auth=None, save=True, log=True, system=False):
if not system and not auth:
raise ValueError('Must provide auth if adding a non-system tag')
if not isinstance(tag, Tag):
tag_instance, created = Tag.all_tags.get_or_create(name=tag, system=system)
else:
tag_instance = tag
if not self.tags.filter(id=tag_instance.id).exists():
self.tags.add(tag_instance)
# TODO: Logging belongs in on_tag_added hook
if log:
self.add_tag_log(tag_instance, auth)
if save:
self.save()
self.on_tag_added(tag_instance)
return tag_instance
def remove_tag(self, *args, **kwargs):
raise NotImplementedError('Removing tags requires that remove_tag is implemented')
def add_system_tag(self, tag, save=True):
if isinstance(tag, Tag) and not tag.system:
raise ValueError('Non-system tag passed to add_system_tag')
return self.add_tag(tag=tag, auth=None, save=save, log=False, system=True)
def add_tag_log(self, *args, **kwargs):
raise NotImplementedError('Logging requires that add_tag_log method is implemented')
def on_tag_added(self, tag):
pass
class Meta:
abstract = True
class AddonModelMixin(models.Model):
# from addons.base.apps import BaseAddonConfig
settings_type = None
ADDONS_AVAILABLE = sorted([config for config in apps.get_app_configs() if config.name.startswith('addons.') and
config.label != 'base'])
class Meta:
abstract = True
@classmethod
def get_addon_key(cls, config):
return 2 << cl | s.ADDONS_AVAILABLE.index(config)
@property
def addons(self):
return self.get_addons()
def get_addons(self):
return filter(None, [
self.get_addon(config.short_name)
for config in self.ADDONS_AVAILABLE
])
def get_oauth_addons(self):
# TODO: Using hasattr is a dirty hack - we should be using issubclass().
# We can't, because importing the parent clas | ses here causes a
# circular import error.
return [
addon for addon in self.get_addons()
if hasattr(addon, 'oauth_provider')
]
def has_addon(self, addon_name, deleted=False):
return bool(self.get_addon(addon_name, deleted=deleted))
def get_addon_names(self):
return [each.short_name for each in self.get_addons()]
def get_or_add_addon(self, name, *args, **kwargs):
addon = self.get_addon(name)
if addon:
return addon
return self.add_addon(name, *args, **kwargs)
def get_addon(self, name, deleted=False):
try:
settings_model = self._settings_model(name)
except LookupError:
return None
if not settings_model:
return None
try:
settings_obj = settings_model.objects.get(owner=self)
if not settings_obj.deleted or deleted:
return settings_obj
except ObjectDoesNotExist:
pass
return None
def add_addon(self, addon_name, auth=None, override=False, _force=False):
"""Add an add-on to the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool override: For shell use only, Allows adding of system addons
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to have
duplicate addons!
:return bool: Add-on was added
"""
if not override and addon_name in settings.SYSTEM_ADDED_ADDONS[self.settings_type]:
return False
# Reactivate deleted add-on if present
addon = self.get_addon(addon_name, deleted=True)
if addon:
if addon.deleted:
addon.undelete(save=True)
return addon
if not _force:
return False
config = apps.get_app_config('addons_{}'.format(addon_name))
model = self._settings_model(addon_name, config=config)
ret = model(owner=self)
ret.on_add()
ret.save() # TODO This doesn't feel right
return ret
def config_addons(self, config, auth=None, save=True):
"""Enable or disable a set of add-ons.
:param dict config: Mapping between add-on names and enabled / disabled
statuses
"""
for addon_name, enabled in config.iteritems():
if enabled:
self.add_addon(addon_name, auth)
else:
self.delete_addon(addon_name, auth)
if save:
self.save()
def delete_addon(self, addon_name, auth=None, _force=False):
"""Delete an add-on from the node.
:param str addon_name: Name of add-on
:param Auth auth: Consolidated authorization object
:param bool _force: For migration testing ONLY. Do not set to True
in the application, or else projects will be allowed to delete
mandatory add-ons!
:return bool: Add-on was deleted
"""
addon = self.get_addon(addon_name)
if not addon:
return False
if self.settings_type in addon.config.added_mandatory and not _force:
raise ValueError('Cannot delete mandatory add-on.')
if getattr(addon, 'external_account', None):
addon.deauthorize(auth=auth)
addon.delete(save=True)
return True
def _settings_model(self, addon_model, config=None):
if not config:
config = apps.get_app_config('addons_{}'.format(addon_model))
return getattr(config, '{}_settings'.format(self.settings_type))
class NodeLinkMixin(models.Model):
class Meta:
abstract = True
def add_node_link(self, node, auth, save=True):
"""Add a node link to a node.
:param Node node: Node to add
:param Auth auth: Consolidated authorization
:param bool save: Save changes
:return: Created pointer
"""
# Fail if node already in nodes / pointers. Note: cast node and node
# to primary keys to test for conflicts with both nodes and pointers
# contained in `self.nodes`.
if NodeRelation.objects.filter(parent=self, child=node, is_node_link=True).exists():
raise ValueError(
'Link to node {0} already exists'.format(node._id)
)
if self.is_registration:
raise NodeStateError('Cannot add a node link to a registration')
# Append node link
node_relation, created = NodeRelation.objects.get_or_create(
parent=self,
child=node,
is_node_link=True
)
# Add log
if hasattr(self, 'add_log'):
self.add_log(
action=NodeLog.NODE_LINK_CREATED,
params={
'parent_node': self.parent_id,
'node': self._id,
'pointer': {
'id': node._id,
'url': node.url,
|
TIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Requires Python 2.6+ and Openssl 1.0+
#
import socket
import glob
import mock
import traceback
import azurelinuxagent.common.osutil.default as osutil
import azurelinuxagent.common.utils.shellutil as shellutil
import azurelinuxagent.common.utils.textutil as textutil
from azurelinuxagent.common.exception import OSUtilError
from azurelinuxagent.common.future import ustr
from azurelinuxagent.common.osutil import get_osutil
from tests.tools import *
actual_get_proc_net_route = 'azurelinuxagent.common.osutil.default.DefaultOSUtil._get_proc_net_route'
def fake_is_loopback(_, iface):
return iface.startswith('lo')
def running_under_travis():
return 'TRAVIS' in os.environ and os.environ['TRAVIS'] == 'true'
class TestOSUtil(AgentTestCase):
def test_restart(self):
# setup
retries = 3
ifname = 'dummy'
with patch.object(shellutil, "run") as run_patch:
run_patch.return_value = 1
# execute
| osutil.DefaultOSUtil.restart_if(osutil.DefaultOSUtil(), ifname=ifname, retries=retries, wait=0)
# assert
self.assertEqual(run_patch.call_count, retries)
self.assertEqual(run_patch.call_args_list[0][0][0], 'ifdown {0} && ifup {0}'.format(ifname))
def test_get_dvd_device_success(self):
with patch.object(os, 'listdir', return_value=['cpu', 'cdrom0']):
osutil.DefaultOSUtil().get_dvd_device()
def test_get_dvd_device_failure(self | ):
with patch.object(os, 'listdir', return_value=['cpu', 'notmatching']):
try:
osutil.DefaultOSUtil().get_dvd_device()
self.fail('OSUtilError was not raised')
except OSUtilError as ose:
self.assertTrue('notmatching' in ustr(ose))
@patch('time.sleep')
def test_mount_dvd_success(self, _):
msg = 'message'
with patch.object(osutil.DefaultOSUtil,
'get_dvd_device',
return_value='/dev/cdrom'):
with patch.object(shellutil,
'run_get_output',
return_value=(0, msg)) as patch_run:
with patch.object(os, 'makedirs'):
try:
osutil.DefaultOSUtil().mount_dvd()
except OSUtilError:
self.fail("mounting failed")
@patch('time.sleep')
def test_mount_dvd_failure(self, _):
msg = 'message'
with patch.object(osutil.DefaultOSUtil,
'get_dvd_device',
return_value='/dev/cdrom'):
with patch.object(shellutil,
'run_get_output',
return_value=(1, msg)) as patch_run:
with patch.object(os, 'makedirs'):
try:
osutil.DefaultOSUtil().mount_dvd()
self.fail('OSUtilError was not raised')
except OSUtilError as ose:
self.assertTrue(msg in ustr(ose))
self.assertTrue(patch_run.call_count == 6)
def test_empty_proc_net_route(self):
routing_table = ""
mo = mock.mock_open(read_data=routing_table)
with patch(open_patch(), mo):
self.assertEqual(len(osutil.DefaultOSUtil().read_route_table()), 0)
def test_no_routes(self):
routing_table = 'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n'
mo = mock.mock_open(read_data=routing_table)
with patch(open_patch(), mo):
raw_route_list = osutil.DefaultOSUtil().read_route_table()
self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0)
def test_bogus_proc_net_route(self):
routing_table = 'Iface\tDestination\tGateway \tFlags\t\tUse\tMetric\t\neth0\t00000000\t00000000\t0001\t\t0\t0\n'
mo = mock.mock_open(read_data=routing_table)
with patch(open_patch(), mo):
raw_route_list = osutil.DefaultOSUtil().read_route_table()
self.assertEqual(len(osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)), 0)
def test_valid_routes(self):
routing_table = \
'Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT \n' \
'eth0\t00000000\tC1BB910A\t0003\t0\t0\t0\t00000000\t0\t0\t0 \n' \
'eth0\tC0BB910A\t00000000\t0001\t0\t0\t0\tC0FFFFFF\t0\t0\t0 \n' \
'eth0\t10813FA8\tC1BB910A\t000F\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \
'eth0\tFEA9FEA9\tC1BB910A\t0007\t0\t0\t0\tFFFFFFFF\t0\t0\t0 \n' \
'docker0\t002BA8C0\t00000000\t0001\t0\t0\t10\t00FFFFFF\t0\t0\t0 \n'
known_sha1_hash = b'\x1e\xd1k\xae[\xf8\x9b\x1a\x13\xd0\xbbT\xa4\xe3Y\xa3\xdd\x0b\xbd\xa9'
mo = mock.mock_open(read_data=routing_table)
with patch(open_patch(), mo):
raw_route_list = osutil.DefaultOSUtil().read_route_table()
self.assertEqual(len(raw_route_list), 6)
self.assertEqual(textutil.hash_strings(raw_route_list), known_sha1_hash)
route_list = osutil.DefaultOSUtil().get_list_of_routes(raw_route_list)
self.assertEqual(len(route_list), 5)
self.assertEqual(route_list[0].gateway_quad(), '10.145.187.193')
self.assertEqual(route_list[1].gateway_quad(), '0.0.0.0')
self.assertEqual(route_list[1].mask_quad(), '255.255.255.192')
self.assertEqual(route_list[2].destination_quad(), '168.63.129.16')
self.assertEqual(route_list[1].flags, 1)
self.assertEqual(route_list[2].flags, 15)
self.assertEqual(route_list[3].flags, 7)
self.assertEqual(route_list[3].metric, 0)
self.assertEqual(route_list[4].metric, 10)
self.assertEqual(route_list[0].interface, 'eth0')
self.assertEqual(route_list[4].interface, 'docker0')
@patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='eth0')
@patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1'})
@patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback)
def test_get_first_if(self, get_all_interfaces_mock, get_primary_interface_mock):
"""
Validate that the agent can find the first active non-loopback
interface.
This test case used to run live, but not all developers have an eth*
interface. It is perfectly valid to have a br*, but this test does not
account for that.
"""
ifname, ipaddr = osutil.DefaultOSUtil().get_first_if()
self.assertEqual(ifname, 'eth0')
self.assertEqual(ipaddr, '10.0.0.1')
@patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.get_primary_interface', return_value='bogus0')
@patch('azurelinuxagent.common.osutil.default.DefaultOSUtil._get_all_interfaces', return_value={'eth0':'10.0.0.1', 'lo': '127.0.0.1'})
@patch('azurelinuxagent.common.osutil.default.DefaultOSUtil.is_loopback', fake_is_loopback)
def test_get_first_if_nosuchprimary(self, get_all_interfaces_mock, get_primary_interface_mock):
ifname, ipaddr = osutil.DefaultOSUtil().get_first_if()
self.assertTrue(ifname.startswith('eth'))
self.assertTrue(ipaddr is not None)
try:
socket.inet_aton(ipaddr)
except socket.error:
self.fail("not a valid ip address")
def test_get_first_if_all_loopback(self):
fake_ifaces = {'lo':'127.0.0.1'}
with patch.object(osutil.DefaultOSUtil, 'get_primary_interface', return_value='bogus0'):
with patch.object(osutil.DefaultOSUtil, '_get_all_interfaces', return_value=fake_ifaces):
self.assertEqual(('', ''), osutil.DefaultOSUtil().get_first_if())
def test_get_all_interfaces(self):
loopback_count = 0
non_loopback_coun |
# Patchwork - automated patch tracking system
# Copyright ( | C) 2016 Linaro Corporation
#
# SPDX-License-Identifier: GPL-2.0-or-later
from rest_framework.response import Response
from rest_framework.reverse import reverse
from rest_framework.views import APIView
class IndexView(APIView):
def get(self, request, *args, **kwargs):
"""List API resources."""
return Response({
'projects': reverse('api-project-list', request=request),
'users': reverse('api-user-list', reques | t=request),
'people': reverse('api-person-list', request=request),
'patches': reverse('api-patch-list', request=request),
'covers': reverse('api-cover-list', request=request),
'series': reverse('api-series-list', request=request),
'events': reverse('api-event-list', request=request),
'bundles': reverse('api-bundle-list', request=request),
})
|
import unittest
from unittest.mock import Mock
class Mailer:
def send_email(self, email, message):
raise NotImplementedError("Not implemented yet")
class DB:
def insert_user(self, user):
raise NotImplementedError("Not implemented yet")
class User:
def __init__(self, email, name):
self.email = email
self.name = name
def registerUser(email, name, db, mailer):
user = User(email, name)
db.insert_user(user)
mailer.send_email(user.email, "Welcome")
return user
class MockTest(unit | test.TestCase):
TEST_EMAIL = 'student@campus.uib.es'
TEST_NAME = 'Student'
def testRegisterUser(self):
mock_db = Mock(DB)
mock_mailer = Mock(Mailer)
user = registerUser(self.TEST_EMAIL, self.TEST_NAME, mock_db, mock_mailer)
mock_db.insert_user.assert_ | called_once_with(user)
mock_mailer.send_email.assert_called_once_with(self.TEST_EMAIL, "Welcome")
self.assertIsInstance(user, User)
self.assertEqual(user.email, self.TEST_EMAIL)
self.assertEqual(user.name, self.TEST_NAME)
def testRegisterUserThrowsNotImplemented(self):
with self.assertRaises(NotImplementedError):
user = registerUser(self.TEST_EMAIL, self.TEST_NAME, DB(), Mailer())
if __name__ == '__main__':
unittest.main()
|
/addons/plugin.program.jogosEmuladores',''))
CHECKVERSION = os.path.join(USERDATA,'version.txt')
KIDS = os.path.join(USERDATA,'kids.txt')
PROFILE = os.path.join(USERDATA,'profiles.xml')
LOCK = os.path.join(USERDATA,'lock.txt')
NOTICE = os.path.join(ADDON,'notice.txt')
WIPE = xbmc.translatePath('special://home/wipe.xml')
CLEAN = xbmc.translatePath('special://home/clean.xml')
my_addon = xbmcaddon.Addon()
dp = xbmcgui.DialogProgress()
checkver=my_addon.getSetting('checkupdates')
dialog = xbmcgui.Dialog()
AddonTitle="[COLOR ghostwhite]Project X[/COLOR] [COLOR lightsteelblue]Wizard[/COLOR]"
GoogleOne = "http://www.google.com"
GoogleTwo = "http://www.google.co.uk"
JarvisUpdate = 0
KryptonUpdate = 0
BetaUpdate = 0
check = plugintools.get_setting("checkupdates")
auto = plugintools.get_setting("autoupdates")
addonupdate = plugintools.get_setting("updaterepos")
if xbmc.getCondVisibility('system.platform.ios') or xbmc.getCondVisibility('system.platform.osx'):
LoginServer = "http://www.projectxwizard/login.php"
JarvisOne = "http://projectxwizard.netne.net/ProjectXwizard/JarvisOne.xml"
JarvisTwo = "http://projectxwizard.netne.net/ProjectXwizard/JarvisTwo.xml"
KryptonOne = "http://projectxwizard.netne.net/ProjectXwizard/KryptonOne.xml"
KryptonTwo = "http://projectxwizard.netne.net/ProjectXwizard/KryptonTwo.xml"
BetaOne = "http://projectxwizard.netne.net/ProjectXwizard/BetaOne.xml"
BetaTwo = "http://projectxwizard.netne.net/ProjectXwizard/BetaTwo.xml"
else:
LoginServer = "http://www.projectxwizard/login.php"
JarvisOne = "http://projectxwizard.netne.net/ProjectXwizard/JarvisOne.xml"
JarvisTwo = "http://projectxwizard.netne.net/ProjectXwizard/JarvisTwo.xml"
KryptonOne = "http://projectxwizard.netne.net/ProjectXwizard/KryptonOne.xml"
KryptonTwo = "http://projectxwizard.netne.net/ProjectXwizard/KryptonTwo.xml"
BetaOne = "http://projectxwizard.netne.net/ProjectXwizard/BetaOne.xml"
BetaTwo = "http://projectxwizard.netne.net/ProjectXwizard/BetaTwo.xml"
COMP = "http://kodiapps.com/how-to-install-Project X-build-on-kodi"
if auto == 'true':
check = 'true'
if os.path.exists(WIPE):
choice = xbmcgui.Dialog().yesno(AddonTitle, '[COLOR slategray]A system reset has been successfully performed.[/COLOR]','Your device has now returned to factory settings.','[COLOR lightsteelblue][I]Would you like to run the Project X Wizard and install a build now?[/COLOR][/I]', yeslabel='[COLOR green][B]YES[/B][/COLOR]',nolabel='[COLOR red][B]NO[/B][/COLOR]')
if choice == 1:
os.remove(WIPE)
xbmc.executebuiltin("RunAddon(plugin.program.jogosEmuladores)")
else:
os.remove(WIPE)
time.sleep(5)
if os.path.exists(NOTICE):
if os.path.exists(CHECKVERSION):
dialog.ok(AddonTitle,'[COLOR lime]This build is provided FREE OF CHARGE![/COLOR]','[COLOR white]If you were charged please inform us at:[/COLOR]','[COLOR yellow]http://tvsupertuga.forum-gratuito.com/[/COLOR]')
os.remove(NOTICE)
def Open_URL(url):
req = urllib2.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
response = urllib2.urlopen(req)
link = response.read()
response.close()
return link.replace('\r','').replace('\n','').replace('\t','')
if (randint(1,6) == 5):
try:
Open_URL(COMP)
except:
pass
nointernet = 0
isplaying = 0
if isplaying == 0:
try:
Open_URL(GoogleOne)
except:
try:
Open_URL(GoogleTwo)
except:
dialog.ok(AddonTitle,'Sorry we are unable to check for updates!','The device is not connected to the internet','Please check your connection settings.')
nointernet = 1
pass
try:
response = urllib2.urlopen(JarvisTwo)
except:
JarvisUpdate = 1
try:
response = urllib2.urlopen(KryptonTwo)
except:
KryptonUpdate = 1
try:
response = urllib2.urlopen(BetaTwo)
except:
BetaUpdate = 1
if nointernet == 0 and JarvisUpdate == 0:
if auto == 'true':
if os.path.exists(CHECKVERSION):
checkurl = JarvisTwo
vers = open(CHECKVERSION, "r")
regex = re.compile(r'<build>(.+?)</build><version>(.+?)</version>')
for line in vers:
currversion = regex.findall(line)
for build,vernumber in currversion:
if vernumber > 0:
req = urllib2.Request(checkurl)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
except:
sys.exit(1)
link=response.read()
response.close()
match = re.compile('<build>'+build+'</build><version>(.+?)</version><fresh>(.+?)</fresh>').findall(link)
for newversion,fresh in match:
if fresh =='false': # TRUE
if newversion > vernumber:
updateurl = JarvisOne
req = urllib2.Request(updateurl)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
except:
sys.exit(1)
link=response.read()
response.close()
match = re.compile('<build>'+build+'</build><url>(.+?)</url>').findall(link)
for url in match:
path = xbmc.translatePath(os.path.join('special://home/addons','packages'))
name = "build"
lib=os.path.join(path, name+'.zip')
try:
os.remove(lib)
except:
pass
downloader.auto(url, lib)
addonfolder = xbmc.translatePath(os.path.join('special://','home'))
time.sleep(2)
unzip(lib,addonfolder)
sys.exit(1)
if nointernet == 0 and KryptonUpdate == 0:
if auto == 'true':
if os.path.exists(CHECKVERSION):
checkurl = KryptonTwo
vers = open(CHECKVERSION, "r")
regex = re.compile(r'<build>(.+?)</build><version>(.+?)</version>')
for line in vers:
currversion = regex.findall(line)
for build,vernumber in currversion:
if vernumber > 0:
req = urllib2.Request(checkurl)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
except:
sys.exit(1)
link=response.read()
response.close()
match = re.compile('<build>'+build+'</build><version>(.+?)</version><fresh>(.+?)</fresh>').findall(link)
for newversion,fresh in match:
if fresh =='false': # TRUE
if newversion > vernumber:
updateurl = KryptonOne
req = urllib2.Request(updateurl)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
except:
sys.exit(1)
link=response.read()
response.close()
match = re.compile('<build>'+build+'</build><url>(.+?)</url>').findall(link)
for url in match:
path = xbmc.translatePath(os.path.join('special://home/addons','packages'))
name = "build"
lib=os.path.join(path, name+'.zip')
try:
os.remove(lib)
except:
pass
downloader.auto(url, lib)
addonfolder = xbmc.translatePath(os.path.join('special://', | 'home'))
time.sleep(2)
unzip(lib,addonfolder)
sys.exit(1)
if nointernet == 0 and BetaUpdate == 0:
if auto == 'true':
if os.path.exists(CHECKVERSION):
checkurl = BetaTwo
vers = open(CHECKVERSION, "r")
regex = re.compile(r'<build>(.+?)</build><version>(.+?)</ | version>')
for line in vers:
currversion = regex.findall(line)
for build,vernumber in currversion:
if vernumber > 0:
req = urllib2.Request(checkurl)
req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
try:
response = urllib2.urlopen(req)
except:
sys.exit(1)
link=response.read()
response.close()
match |
from django.apps import AppConfig |
class CheckoutAppConfig(AppConfig):
name = 'ecommerce.extensions.checkout'
verbose_name = 'Checkout'
def ready(self):
super(CheckoutAppConfig, self).ready() |
# noinspection PyUnresolvedReferences
import ecommerce.extensions.checkout.signals # pylint: disable=unused-variable
|
"""
Created on November 20, 2019
This file is subject to the terms and conditions defined in the
file 'LICENSE.txt', whic | h is part of this source code package.
@author: David Moss
"""
# Section ID's
SECTION_ID_ALERTS = "alerts"
SECTION_ID_N | OTES = "notes"
SECTION_ID_TASKS = "tasks"
SECTION_ID_SLEEP = "sleep"
SECTION_ID_ACTIVITIES = "activities"
SECTION_ID_MEALS = "meals"
SECTION_ID_MEDICATION = "medication"
SECTION_ID_BATHROOM = "bathroom"
SECTION_ID_SOCIAL = "social"
SECTION_ID_MEMORIES = "memories"
SECTION_ID_SYSTEM = "system"
def add_entry(botengine, location_object, section_id, comment=None, subtitle=None, identifier=None, include_timestamp=False, timestamp_override_ms=None):
"""
Add a section and bullet point the current daily report
:param botengine: BotEngine environment
:param location_object: Location object
:param section_id: Section ID like dailyreport.SECTION_ID_ACTIVITIES
:param comment: Comment like "Woke up."
:param subtitle: Subtitle comment like "Consistent sleep schedule and good quality sleep last night."
:param identifier: Optional identifier to come back and edit this entry later.
:param include_timestamp: True to include a timestamp like "7:00 AM - <comment>" (default is False)
:param timestamp_override_ms: Optional timestamp in milliseconds to override the current time when citing the timestamp with include_timestamp=True
"""
content = {
"section_id": section_id,
"comment": comment,
"subtitle": subtitle,
"identifier": identifier,
"include_timestamp": include_timestamp,
"timestamp_override_ms": timestamp_override_ms
}
location_object.distribute_datastream_message(botengine, "daily_report_entry", content, internal=True, external=False) |
# -*- coding: utf-8 -*-
"""
Custom model managers for finan | ce.
"""
from .entity_manager import FinanceEntityMana | ger
__all__ = (
'FinanceEntityManager',
) |
#THIS IS /helicopter_providence/middletown_3_29_11/site1_planes/boxm2_site1_1/boxm2_create_scene.py
from boxm2WriteSceneXML import *
import optp | arse
from xml.etree.ElementTree import ElementTree
import os, sys
#Parse inputs
parser = optparse.OptionParser(description='Create BOXM2 xml file');
parser.add_option('--scene_info', action="store", dest="scene_info");
|
parser.add_option('--boxm2_dir', action="store", dest="boxm2_dir");
options, args = parser.parse_args();
boxm2_dir = options.boxm2_dir;
scene_info = options.scene_info;
if not os.path.isdir(boxm2_dir + '/'):
os.mkdir(boxm2_dir + '/');
print 'Parsing: '
print scene_info
print boxm2_dir
#parse xml file
tree = ElementTree();
tree.parse(scene_info);
#find scene dimensions
bbox_elm = tree.getroot().find('bbox');
if bbox_elm is None:
print "Invalid info file: No bbox"
sys.exit(-1);
minx = float(bbox_elm.get('minx'));
miny = float(bbox_elm.get('miny'));
minz = float(bbox_elm.get('minz'));
maxx = float(bbox_elm.get('maxx'));
maxy = float(bbox_elm.get('maxy'));
maxz = float(bbox_elm.get('maxz'));
#find scene resolution
res_elm = tree.getroot().find('resolution');
if res_elm is None:
print "Invalid info file: No resolution"
sys.exit(-1);
resolution = float(res_elm.get('val'));
print ("Resolution: " + str(resolution));
#PARAMETERS
ntrees=32
max_num_lvls=4
min_pt = [minx, miny, minz]
max_pt = [maxx, maxy, maxz]
writeSceneFromBox(boxm2_dir,resolution,min_pt,max_pt,ntrees,max_num_lvls);
|
from __future__ import absolute_import, unicode_literals
from django.http import HttpResponseBadRequest
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from debug_toolbar.panels.sql.forms import SQLSelectForm
@csrf_exempt
def sql_select(request):
"""Returns the output of the SQL SELECT statement"""
form = SQLSelectForm(request.POST or None)
if form.is_valid():
sql = form.cleaned_data['raw_sql']
params = form.cleaned_data['params']
cursor = form.cursor
cursor.execute(sql, params)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
cursor.close()
context = {
'result': result,
'sql': form.reformat_sql(),
'duration': form.cleaned_data['duration'],
'headers': headers,
'alias': form.cleaned_data['alias'],
}
return render(request, 'debug_toolbar/panels/sql_select.html', context)
return HttpResponseBadRequest('Form errors')
@csrf_exempt
def sql_explain(request):
"""Returns the output of the SQL EXPLAIN on the given query"""
form = SQLSelectForm(request.POST or None)
if form.is_valid():
sql = form.cleaned_data['raw_sql']
params = form.cleaned_data['params']
vendor = form.connection.vendor
cursor = form.cursor
if vendor == 'sqlite':
# SQLite's EXPLAIN dumps the low-level opcodes generated for a query;
# EXPLAIN QUERY PLAN dumps a more human-readable summary
# See http://www.sqlite.org/lang_explain.html for details
cursor.execute("EXPLAIN QUERY PLAN %s" % (sql,), params)
elif vendor == 'postgresql':
cursor.execute("EXPLAIN ANALYZE %s" % (sql,), params)
else:
cursor.execute("EXPLAIN %s" % (sql,), params)
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
cursor.close()
context = {
'result': result,
'sql': form.reformat_sql(),
'duration': form.cleaned_data['duration'],
'headers': headers,
'alias': form.cleaned_data['alias'],
}
return render(request, 'debug_toolbar/panels/sql_explain.html', context)
return HttpResponseBadRequest('Form errors')
@csrf_exempt
def sql_profile(request):
"""Returns the output of running the SQL and getting the profiling statistics"""
form = SQLSelectForm(request.POST or None)
if form.is_valid():
sql = form.cleaned_data['raw_sql']
params = form.cleaned_data['params']
cursor = form.cursor
result = None
headers = None
result_error = None
try:
cursor.execute("SET PROFILING=1") # Enable profiling
cursor.execute(sql, params) # Execute SELECT
cursor.execute("SET PROFILING=0") # Disable profiling
# The Query ID should always be 1 here but I'll subselect to get
# the last one just in case...
cursor.execute("""
SELECT *
FROM information_schema.profiling
WHERE query_id = (
SELECT query_id
FROM information_schema.profiling
ORDER BY query_id DESC
LIMIT 1
)
""")
headers = [d[0] for d in cursor.description]
result = cursor.fetchall()
except Exception:
resu | lt_error = "Profiling is either not available or not supported by your database."
cursor.close()
context = {
'result': result,
'result_error': result_er | ror,
'sql': form.reformat_sql(),
'duration': form.cleaned_data['duration'],
'headers': headers,
'alias': form.cleaned_data['alias'],
}
return render(request, 'debug_toolbar/panels/sql_profile.html', context)
return HttpResponseBadRequest('Form errors')
|
from dcgpy import expression_gdual_double as expression
from dcgpy import kernel_set_gdual_double as kernel_set
from pyaudi import gdual_double as gdual
# 1- Instantiate a random expression using the 4 basic arithmetic operations
ks = kernel_set(["sum", "diff", "div", "mul"])
ex = expression(inputs = 1,
outputs = 1,
rows = 1,
cols = 6,
levels_back = 6,
arity = 2,
kernels = ks(),
n_eph = 0,
seed = 4232123212)
# 2 - Define the symbol set to be used in visualizing the expression
# (in our case, 1 input variable na | med "x") and visualize the expression
in_sym = ["x"]
print("Expression:", ex(in_sym)[0])
# 3 - Print the simplified expression
print("Simplified expression:", ex.simplify(in_sym))
# 4 - Visualize the dCGP graph
ex.visualize(in_sym)
# 5 - Define a gdual number of value 1.2 and truncation order 2
x = gdual(1.2, "x", 2)
# 6 - Compute the output of the expression and its second derivative in x = 1.2 and print
print("Exp | ression in x=1.2:", ex([x])[0])
print("Second derivative:", ex([x])[0].get_derivative([2]))
# 5 - Mutate the expression with 2 random mutations of active genes and print
ex.mutate_active(2)
print("Mutated expression:", ex(in_sym)[0])
|
import _plotly_utils.basevalidators
class BorderwidthValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(
self,
plotly_name="borderwidth",
parent_name="histogram2dcontour.co | lorbar",
**kwargs
):
super(BorderwidthValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
| min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
|
##parent_menu_widget.remove(menu_widget.get_submenu())
##else:
#if (1):
#parent_menu_widget.removeAction(menu_widget.menuAction())
#self.remove_actions_by_menu_name_id(menu_name_id)
#self.remove_separators_by_menu_name_id(menu_name_id)
#self.remove_submenus_by_menu_name_id(menu_name_id)
# -------------------------------------------------------------------------------------------
#def clearAll(self):
##if TrayEngine == "KDE":
##self.menu.clear()
##elif TrayEngine == "AppIndicator":
##for child in self.menu.get_children():
##self.menu.remove(child)
##else:
#if (1):
#self.menu.clear()
#self.act_indexes = []
#self.sep_indexes = | []
#self.menu_indexes = []
#def clearMenu(self, menu_name_id):
#menu_index = self | .get_menu_index(menu_name_id)
#if menu_index < 0: return
#menu_widget = self.menu_indexes[menu_index][1]
##if TrayEngine == "KDE":
##menu_widget.clear()
##elif TrayEngine == "AppIndicator":
##for child in menu_widget.get_submenu().get_children():
##menu_widget.get_submenu().remove(child)
##else:
#if (1):
#menu_widget.clear()
#list_of_submenus = [menu_name_id]
#for x in range(0, 10): # 10x level deep, should cover all cases...
#for this_menu_name_id, menu_widget, parent_menu_id in self.menu_indexes:
#if parent_menu_id in list_of_submenus and this_menu_name_id not in list_of_submenus:
#list_of_submenus.append(this_menu_name_id)
#for this_menu_name_id in list_of_submenus:
#self.remove_actions_by_menu_name_id(this_menu_name_id)
#self.remove_separators_by_menu_name_id(this_menu_name_id)
#self.remove_submenus_by_menu_name_id(this_menu_name_id)
# -------------------------------------------------------------------------------------------
def getTrayEngine(self):
return TrayEngine
def isTrayAvailable(self):
if TrayEngine in ("KDE", "Qt"):
# Ask Qt
return QSystemTrayIcon.isSystemTrayAvailable()
if TrayEngine == "AppIndicator":
# Ubuntu/Unity always has a systray
return True
return False
def handleQtCloseEvent(self, event):
if self.isTrayAvailable() and self._parent.isVisible():
event.accept()
self.__hideShowCall()
return
self.close()
QMainWindow.closeEvent(self._parent, event)
# -------------------------------------------------------------------------------------------
def show(self):
if not self._quit_added:
self._quit_added = True
if TrayEngine != "KDE":
self.addSeparator("_quit")
self.addAction("show", self._parent.tr("Minimize"))
self.addAction("quit", self._parent.tr("Quit"))
self.setActionIcon("quit", "application-exit")
self.connect("show", self.__hideShowCall)
self.connect("quit", self.__quitCall)
if TrayEngine == "KDE":
self.tray.setStatus(KStatusNotifierItem.Active)
elif TrayEngine == "AppIndicator":
self.tray.set_status(AppIndicator.IndicatorStatus.ACTIVE)
elif TrayEngine == "Qt":
self.tray.show()
def hide(self):
if TrayEngine == "KDE":
self.tray.setStatus(KStatusNotifierItem.Passive)
elif TrayEngine == "AppIndicator":
self.tray.set_status(AppIndicator.IndicatorStatus.PASSIVE)
elif TrayEngine == "Qt":
self.tray.hide()
def close(self):
if TrayEngine == "KDE":
self.menu.close()
elif TrayEngine == "AppIndicator":
if self._gtk_running:
self._gtk_running = False
Gtk.main_quit()
elif TrayEngine == "Qt":
self.menu.close()
def exec_(self, app):
self._app = app
if TrayEngine == "AppIndicator":
self._gtk_running = True
return Gtk.main()
else:
return app.exec_()
# -------------------------------------------------------------------------------------------
def get_act_index(self, act_name_id):
for i in range(len(self.act_indexes)):
if self.act_indexes[i][iActNameId] == act_name_id:
return i
else:
print("systray.py - Failed to get action index for %s" % act_name_id)
return -1
def get_sep_index(self, sep_name_id):
for i in range(len(self.sep_indexes)):
if self.sep_indexes[i][iSepNameId] == sep_name_id:
return i
else:
print("systray.py - Failed to get separator index for %s" % sep_name_id)
return -1
def get_menu_index(self, menu_name_id):
for i in range(len(self.menu_indexes)):
if self.menu_indexes[i][iMenuNameId] == menu_name_id:
return i
else:
print("systray.py - Failed to get menu index for %s" % menu_name_id)
return -1
#def get_parent_menu_widget(self, parent_menu_id):
#if parent_menu_id != None:
#menu_index = self.get_menu_index(parent_menu_id)
#if menu_index >= 0:
#return self.menu_indexes[menu_index][1]
#else:
#print("systray.py::Failed to get parent Menu widget for", parent_menu_id)
#return None
#else:
#return self.menu
#def remove_actions_by_menu_name_id(self, menu_name_id):
#h = 0
#for i in range(len(self.act_indexes)):
#act_name_id, act_widget, parent_menu_id, act_func = self.act_indexes[i - h]
#if parent_menu_id == menu_name_id:
#self.act_indexes.pop(i - h)
#h += 1
#def remove_separators_by_menu_name_id(self, menu_name_id):
#h = 0
#for i in range(len(self.sep_indexes)):
#sep_name_id, sep_widget, parent_menu_id = self.sep_indexes[i - h]
#if parent_menu_id == menu_name_id:
#self.sep_indexes.pop(i - h)
#h += 1
#def remove_submenus_by_menu_name_id(self, submenu_name_id):
#h = 0
#for i in range(len(self.menu_indexes)):
#menu_name_id, menu_widget, parent_menu_id = self.menu_indexes[i - h]
#if parent_menu_id == submenu_name_id:
#self.menu_indexes.pop(i - h)
#h += 1
# -------------------------------------------------------------------------------------------
def gtk_call_func(self, gtkmenu, act_name_id):
i = self.get_act_index(act_name_id)
if i < 0: return None
return self.act_indexes[i][iActFunc]
def qt_systray_clicked(self, reason):
if reason in (QSystemTrayIcon.DoubleClick, QSystemTrayIcon.Trigger):
self.__hideShowCall()
# -------------------------------------------------------------------------------------------
def __hideShowCall(self):
if self._parent.isVisible():
self.setActionText("show", self._parent.tr("Restore"))
self._parent.hide()
if self._app:
self._app.setQuitOnLastWindowClosed(False)
else:
self.setActionText("show", self._parent.tr("Minimize"))
if self._parent.isMaximized():
self._parent.showMaximized()
else:
self._parent.showNormal()
if self._app:
self._app.setQuitOnLastWindowClosed(True)
QTimer.singleShot(500, self.__raiseWindow)
def __quitCall(self):
if self._app:
self._app.setQuitOnLastWindowClosed(True)
self._parent.hide()
self._parent.close()
if self._app:
self._app.quit()
def __raiseWindow(self):
self._parent.activateWindow()
self._parent.raise_()
#----------- |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2018 João Pedro Rodrigues
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Removes all non-coordinate records from the file.
Keeps only MODEL, ENDMDL, END, ATOM, HETATM, CONECT.
Usage:
python pdb_keepcoord.py <pdb file>
Example:
python pdb_keepcoord.py 1CTF.pdb
This program is part of the `pdb-tools` suite of utilities and should not be
distributed isolatedly. The `pdb-tools` were created to quickly manipulate PDB
files using the terminal, and can be used sequentially, with one tool streaming
data to another. They are based on old FORTRAN77 code that was taking too much
effort to maintain and compile. RIP.
"""
import os
import sys
__author__ = "Joao Rodrigues"
__email__ = "j.p.g.l.m.rodrigues@gmail.com"
def check_input(args):
"""Checks whether to read from stdin/file and validates user input/options.
"""
# Defaults
fh = sys.stdin # file handle
if not len(args):
# Reading from pipe with default option
if sys.stdin.isatty():
sys.stderr.write(__doc__)
sys.exit(1)
elif len(args) == 1:
if not os.path.isfile(args[0]):
emsg = 'ERROR!! File not found or not readable: \'{}\'\n'
sys.stderr.write(emsg.format(args[0]))
sys.stderr.write(__doc__)
sys.exit(1)
fh = open(args[0], 'r')
else: # Whatever ...
emsg = 'ERROR!! Script takes 1 argument, not \'{}\'\n'
sys.stderr.write(emsg.format(len(args)))
sys.stderr.write(__doc__)
sys.exit(1)
return fh
def keep_coordinates(fhandle):
"""Keeps only coordinate reco | rds in the PDB file.
"""
records = ('MODEL ', 'ATOM ', 'HETATM',
'ENDMDL', 'END ',
'TER ', 'CONECT')
for line in fhandle:
if line.startswith(records):
yield line
def main():
# Check Input
| pdbfh = check_input(sys.argv[1:])
# Do the job
new_pdb = keep_coordinates(pdbfh)
try:
_buffer = []
_buffer_size = 5000 # write N lines at a time
for lineno, line in enumerate(new_pdb):
if not (lineno % _buffer_size):
sys.stdout.write(''.join(_buffer))
_buffer = []
_buffer.append(line)
sys.stdout.write(''.join(_buffer))
sys.stdout.flush()
except IOError:
# This is here to catch Broken Pipes
# for example to use 'head' or 'tail' without
# the error message showing up
pass
# last line of the script
# We can close it even if it is sys.stdin
pdbfh.close()
sys.exit(0)
if __name__ == '__main__':
main()
|
from clickFuUtils import cfAction
class osmViewMap(cfAction):
def __init__(self,iface):
cfAction.__init__(self,self.name(),iface)
return None
def name(self):
return "View OSM map"
def desc(self):
return "Goto Location on OpenStreetMap"
def createURL(self,lat,long):
url = "http://www.openstreetmap.org/#map=17/%s/%s" % (lat,long)
retu | rn url
class osmEditMap(cfAction):
def __init__(self,iface):
cfAction.__init__(self,self.name(),iface)
return None
def name(self):
return "Edit OSM with iD"
def desc(self):
return "Goto Location on OpenStreetMap and start editing with iD"
def createURL(self,lat,long):
url = "http://www.openstreetmap.org/edit?editor=id#map=17/%s/%s" % (lat,long)
return url
class osmEditMapJOSM(cfAction):
def __i | nit__(self,iface):
cfAction.__init__(self,self.name(),iface)
return None
def name(self):
return "Edit OSM with JOSM"
def desc(self):
return "Goto Location on OpenStreetMap and start editing with JOSM"
def createURL(self,lat,long):
url = "http://127.0.0.1:8111/load_and_zoom?left=%s&top=%s&right=%s&bottom=%s" % (long-0.005,lat+0.005,long+0.005,lat-0.005)
return url
|
from django.conf import settings
# Safe User import for Django < 1.5
try:
from django.contrib.auth import | get_user_model
except ImportError:
from django.contrib.auth.models | import User
else:
User = get_user_model()
# Safe version of settings.AUTH_USER_MODEL for Django < 1.5
auth_user_model = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Trading As Brands
# Copyright (C) 2015 OpusVL (<http://opusvl.com/>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Trading As Brands | ',
'version': '0.1',
'author': 'OpusVL',
'website': 'http://opusvl.com/',
'summary': 'Allow company to present different branding on documents sent to different customers',
'description': """Allow company to present different branding on documents sent to different customers,
""",
'images': [
],
'depends': [
],
'data': [
' | security/brand_groups.xml',
'security/ir.model.access.csv',
'res_partner_view.xml',
'res_company_brand_view.xml',
'res_company_view.xml',
'report_external_layout_modification.xml',
],
'demo': [
],
'test': [
],
'license': 'AGPL-3',
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
_path
def can_access_file(filePath):
'''test the existence of file'''
return os.access(filePath, os.F_OK)
def can_access_directory(destinationDirectory):
'''test readability, writability and executablility of directory'''
return os.access(destinationDirectory, os.R_OK | os.W_OK | os.X_OK)
def makedirs(destinationDirectory):
'''recursive directory creation function'''
try:
if not os.access(destinationDirectory, os.F_OK):
os.makedirs(destinationDirectory)
except OSError:
error(_('Cannot create directory %s') % destinationDirectory)
def echo(destionationFile, content):
try:
f = open(destionationFile, 'a')
f.write('%s\n' % content)
f.close()
except IOError:
error(_('ActionsAPI [echo]: Can\'t append to file %s.') % (destionationFile))
def chmod(filePath, mode = 0755):
'''change the mode of filePath to the mode'''
filePathGlob = glob.glob(filePath)
if len(filePathGlob) == 0:
error(_("ActionsAPI [chmod]: No file matched pattern \"%s\".") % filePath)
for fileName in filePathGlob:
if can_access_file(fileName):
try:
os.chmod(fileName, mode)
except OSError:
ctx.ui.error(_('ActionsAPI [chmod]: Operation not permitted: %s (mode: 0%o)') \
% (fileName, mode))
else:
ctx.ui.error(_('ActionsAPI [chmod]: File %s doesn\'t exists.') % (fileName))
def chown(filePath, uid = 'root', gid = 'root'):
'''change the owner and group id of filePath to uid and gid'''
if can_access_file(filePath):
try:
os.chown(filePath, pwd.getpwnam(uid)[2], grp.getgrnam(gid)[2])
except OSError:
ctx.ui.error(_('ActionsAPI [chown]: Operation not permitted: %s (uid: %s, gid: %s)') \
% (filePath, uid, gid))
else:
ctx.ui.error(_('ActionsAPI [chown]: File %s doesn\'t exists.') % filePath)
def sym(source, destination):
'''creates symbolic link'''
try:
os.symlink(source, destination)
except OSError:
ctx.ui.error(_('ActionsAPI [sym]: Permission denied: %s to %s') % (source, destination))
def unlink(pattern):
'''remove the file path'''
filePathGlob = glob.glob(pattern)
if len(filePathGlob) == 0:
ctx.ui.error(_("No file matched pattern \"%s\". Remove operation failed.") % pattern)
return
for filePath in filePathGlob:
if isFile(filePath) or isLink(filePath):
try:
os.unlink(filePath)
except OSError:
ctx.ui.error(_('ActionsAPI [unlink]: Permission denied: %s.') % (filePath))
elif isDirectory(filePath):
pass
else:
ctx.ui.error(_('ActionsAPI [unlink]: File %s doesn\'t exists.') % (filePath))
def unlinkDir(sourceDirectory):
'''delete an entire directory tree'''
if isDirectory(sourceDirectory) or isLink(sourceDirectory):
try:
shutil.rmtree(sourceDirectory)
except OSError:
error(_('ActionsAPI [unlinkDir]: Operation not permitted: %s') % (sourceDirectory))
elif isFile(sourceDirectory):
pass
else:
error(_('ActionsAPI [unlinkDir]: Directory %s doesn\'t exists.') % (sourceDirectory))
def move(source, destination):
'''recursively move a "source" file or directory to "destination"'''
sourceGlob = glob.glob(source)
if len(sourceGlob) == 0:
error(_("ActionsAPI [move]: No file matched pattern \"%s\".") % source)
for filePath in sourceGlob:
if isFile(filePath) or isLink(filePath) or isDirectory(filePath):
try:
shutil.move(filePath, destination)
except OSError:
error(_('ActionsAPI [move]: Permission denied: %s to %s') % (filePath, destination))
else:
error(_('ActionsAPI [move]: File %s doesn\'t exists.') % (filePath))
# FIXME: instead of passing a sym parameter, split copy and copytree into 4 different function
def copy(source, destination, sym = True):
'''recursively copy a "source" file or directory to "destination"'''
sourceGlob = glob.glob(source)
if len(sourceGlob) == 0:
error(_("ActionsAPI [copy]: No file matched pattern \"%s\".") % source)
for filePath in sourceGlob:
if isFile(filePath) and not isLink(filePath):
try:
shutil.copy(filePath, destination)
except IOError:
error(_('ActionsAPI [copy]: Permission denied: %s to %s') % (filePath, destination))
elif isLink(filePath) and sym:
if isDirectory(destination):
os.symlink(os.readlink(filePath), join_path(destination, os.path.basename(filePath)))
else:
if isFile(destination):
os.remove(destination)
os.symlink(os.readlink(filePath), destination)
elif isLink(filePath) and not s | ym:
if isDirectory(filePath):
| copytree(filePath, destination)
else:
shutil.copy(filePath, destination)
elif isDirectory(filePath):
copytree(filePath, destination, sym)
else:
error(_('ActionsAPI [copy]: File %s does not exist.') % filePath)
def copytree(source, destination, sym = True):
'''recursively copy an entire directory tree rooted at source'''
if isDirectory(source):
if os.path.exists(destination):
if isDirectory(destination):
copytree(source, join_path(destination, os.path.basename(source.strip('/'))))
return
else:
copytree(source, join_path(destination, os.path.basename(source)))
return
try:
shutil.copytree(source, destination, sym)
except OSError, e:
error(_('ActionsAPI [copytree] %s to %s: %s') % (source, destination, e))
else:
error(_('ActionsAPI [copytree]: Directory %s doesn\'t exists.') % (source))
def touch(filePath):
'''changes the access time of the 'filePath', or creates it if it does not exist'''
filePathGlob = glob.glob(filePath)
if filePathGlob:
if len(filePathGlob) == 0:
error(_("ActionsAPI [touch]: No file matched pattern \"%s\".") % filePath)
for f in filePathGlob:
os.utime(f, None)
else:
try:
f = open(filePath, 'w')
f.close()
except IOError:
error(_('ActionsAPI [touch]: Permission denied: %s') % (filePath))
def cd(directoryName = ''):
'''change directory'''
current = os.getcwd()
if directoryName:
os.chdir(directoryName)
else:
os.chdir(os.path.dirname(current))
def ls(source):
'''listdir'''
if os.path.isdir(source):
return os.listdir(source)
else:
return glob.glob(source)
def export(key, value):
'''export environ variable'''
os.environ[key] = value
def isLink(filePath):
'''return True if filePath refers to a symbolic link'''
return os.path.islink(filePath)
def isFile(filePath):
'''return True if filePath is an existing regular file'''
return os.path.isfile(filePath)
def isDirectory(filePath):
'''Return True if filePath is an existing directory'''
return os.path.isdir(filePath)
def isEmpty(filePath):
'''Return True if filePath is an empty file'''
return os.path.getsize(filePath) == 0
def realPath(filePath):
'''return the canonical path of the specified filename, eliminating any symbolic links encountered in the path'''
return os.path.realpath(filePath)
def baseName(filePath):
'''return the base name of pathname filePath'''
return os.path.basename(filePath)
def dirName(filePath):
'''return the directory name of pathname path'''
return os.path.dirname(filePath)
def system(command):
command = string.join(string.split(command))
retValue = run_logged(command)
#if return value is different than 0, it means error, raise exception
if retValue |
"""Workaround for formatting issue
Source: http://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Python.04.html
"""
import decimal
import json
class Decimal | Encoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decim | al):
if o % 1 > 0:
return float(o)
else:
return int(o)
return super(DecimalEncoder, self).default(o)
|
reements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import contextlib
import multiprocessing
import multiprocessing.managers
import os
import platform
import random
import signal
import socket
import subprocess
import sys
import threading
import time
from .compat import str_join
from .test import TestEntry, domain_socket_path
from .report import ExecReporter, SummaryReporter
RESULT_TIMEOUT = 128
RESULT_ERROR = 64
class ExecutionContext(object):
def __init__(self, cmd, cwd, env, report):
self._log = multiprocessing.get_logger()
self.report = report
self.cmd = cmd
self.cwd = cwd
self.env = env
self.timer = None
self.expired = False
def _expire(self):
self._log.info('Timeout')
self.expired = True
self.kill()
def kill(self):
self._log.debug('Killing process : %d' % self.proc.pid)
if platform.system() != 'Windows':
try:
os.killpg(self.proc.pid, signal.SIGKILL)
except Exception as err:
self._log.info('Failed to kill process group : %s' % str(err))
try:
self.proc.kill()
except Exception as err:
self._log.info('Failed to kill process : %s' % str(err))
self.report.killed()
def _popen_args(self):
args = {
'cwd': self.cwd,
'env': self.env,
'stdout': self.report.out,
'stderr': subprocess.STDOUT,
}
# make sure child processes doesn't remain after killing
if platform.system() == 'Windows':
DETACHED_PROCESS = 0x00000008
args.update(creationflags=DETACHED_PROCESS | subprocess.CREATE_NEW_PROCESS_GROUP)
else:
args.update(preexec_fn=os.setsid)
return args
def start(self, timeout=0):
joined = str_join(' ', self.cmd)
self._log.debug('COMMAND: %s', joined)
self._log.debug('WORKDIR: %s', self.cwd)
self._log.debug('LOGFILE: %s', self.report.logpath)
self.report.begin()
self.proc = subprocess.Popen(self.cmd, **self._popen_args())
if timeout > 0:
self.timer = threading.Timer(timeout, self._expire)
self.timer.start()
return self._scoped()
@contextlib.contextmanager
def _scoped(self):
yield self
self._log.debug('Killing scoped process')
self.kill()
def wait(self):
self.proc.communicate()
if self.timer:
self.timer.cancel()
self.report.end(self.returncode)
@property
def returncode(self):
return self.proc.returncode if self.proc else None
def exec_context(port, testdir, test, prog):
report = ExecReporter(testdir, test, prog)
prog.build_command(port)
return ExecutionContext(prog.command, prog.workdir, prog.env, report)
def run_test(testdir, test_dict, async=True, max_retry=3):
try:
logger = multiprocessing.get_logger()
retry_count = 0
test = TestEntry(testdir, **test_dict)
while True:
if stop.is_set():
logger.debug('Skipping because shutting down')
return None
logger.debug('Start')
with PortAllocator.alloc_port_scoped(ports, test.socket) as port:
logger.debug('Start with port %d' % port)
sv = exec_context(port, testdir, test, test.server)
cl = exec_context(port, testdir, test, test.client)
logger.debug('Starting server')
with sv.start():
if test.delay > 0:
logger.debug('Delaying client for %.2f seconds' % test.delay)
time.sleep(test.delay)
cl_retry_count = 0
cl_max_retry = 10
cl_retry_wait = 0.5
while True:
logger.debug('Starting client')
cl.start(test.timeout)
logger.debug('Waiting client')
cl.wait()
if not cl.report.maybe_false_positive() or cl_retry_count >= cl_max_retry:
if cl_retry_count > 0 and cl_retry_count < cl_max_retry:
logger.warn('[%s]: Connected after %d retry (%.2f sec each)' % (test.server.name, cl_retry_count, cl_retry_wait))
break
logger.debug('Server may not be ready, waiting %.2f second...' % cl_retry_wait)
time.sleep(cl_retry_wait)
cl_retry_count += 1
if not sv.report.maybe_false_positive() or retry_count >= max_retry:
logger.debug('Finish')
return RESULT_TIMEOUT if cl.expired else cl.proc.returncode
logger.warn('[%s]: Detected socket bind failure, retrying...' % test.server.name)
retry_count += 1
except (KeyboardInterrupt, SystemExit):
logger.info('Interrupted execution')
if not async:
raise
stop.set()
return None
except Exception as ex:
logger.warn('%s', ex)
if not async:
raise
logger.debug('Error executing [%s]', test.name, exc_info=sys.exc_info())
return RESULT_ERROR
class PortAllocator(object):
def __init__(self):
self._log = multiprocessing.get_logger()
self._lock = multiprocessing.Lock()
self._ports = set()
self._dom_ports = set()
self._last_alloc = 0
def _get_tcp_port(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('127.0.0.1', 0))
port = sock.getsockname()[1]
self._lo | ck.acquire()
try:
ok = port not in self._ports
if ok:
self._ports.add(port)
self._last_alloc = time.time()
finally:
self._lock.release()
| sock.close()
return port if ok else self._get_tcp_port()
def _get_domain_port(self):
port = random.randint(1024, 65536)
self._lock.acquire()
try:
ok = port not in self._dom_ports
if ok:
self._dom_ports.add(port)
finally:
self._lock.release()
return port if ok else self._get_domain_port()
def alloc_port(self, socket_type):
if socket_type in ('domain', 'abstract'):
return self._get_domain_port()
else:
return self._get_tcp_port()
# static method for inter-process invokation
@staticmethod
@contextlib.contextmanager
def alloc_port_scoped(allocator, socket_type):
port = allocator.alloc_port(socket_type)
yield port
allocator.free_port(socket_type, port)
def free_port(self, socket_type, port):
self._log.debug('free_port')
self._lock.acquire()
try:
if socket_type == 'domain':
self._dom_ports.remove(port)
path = domain_socket_path(port)
if os.path.exists(path):
os.remove(path)
elif socket_type == 'abstract':
self._dom_ports.remove(port)
else:
self._ports.remove(port)
except IOError as err:
self._log.info('Error while freeing port : %s' % str(err))
finally:
self._lock.release()
class NonAsyncResult(object):
def __init__(self, value):
self._value = value
def get(self, timeout=None):
return self._value
def wait(self, timeout=None):
pass
def ready(self):
return True
def successful(self):
return self._value == 0
class TestDispatcher(object):
def __init__(self, testdir, concurrency):
self._log = multiprocessing.get_logger()
self.testdir = testdir
# seems needed for python 2.x to handle keyboard interrupt
self._stop = multiprocessing.Event()
self._async = concurrency > 1
if not self._async:
self._pool = None
global stop
global ports
stop = self._stop
ports = PortAllocator()
else:
self._m = multiprocessing.managers.BaseManager()
self._m.register('ports', PortAllocator)
self._m.start()
self._pool = multiprocessing.Pool(concurrency, self._pool_init, (self._m.address,))
self._report = SummaryReporter(testdir, concurrency > 1)
|
import sys
import pytest
from opentracing.ext import tags
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from opentracing_instrumentation.client_hooks import mysqldb as mysqldb_hooks
from opentracing_instrumentation.request_context import span_in_context
from .sql_common import metadata, User
SKIP_REASON_PYTHON_3 = 'MySQLdb is not compatible with Python 3'
SKIP_REASON_CONNECTION = 'MySQL is not running or cannot connect'
MYSQL_CONNECTION_STRING = 'mysql://root@127.0.0.1/te | st'
@pytest.fixture
def session():
Session = sessionmaker()
e | ngine = create_engine(MYSQL_CONNECTION_STRING)
Session.configure(bind=engine)
metadata.create_all(engine)
try:
yield Session()
except:
pass
@pytest.fixture(autouse=True, scope='module')
def patch_sqlalchemy():
mysqldb_hooks.install_patches()
try:
yield
finally:
mysqldb_hooks.reset_patches()
def is_mysql_running():
try:
import MySQLdb
with MySQLdb.connect(host='127.0.0.1', user='root'):
pass
return True
except:
return False
def assert_span(span, operation, parent=None):
assert span.operation_name == 'MySQLdb:' + operation
assert span.tags.get(tags.SPAN_KIND) == tags.SPAN_KIND_RPC_CLIENT
if parent:
assert span.parent_id == parent.context.span_id
assert span.context.trace_id == parent.context.trace_id
else:
assert span.parent_id is None
@pytest.mark.skipif(not is_mysql_running(), reason=SKIP_REASON_CONNECTION)
@pytest.mark.skipif(sys.version_info.major == 3, reason=SKIP_REASON_PYTHON_3)
def test_db(tracer, session):
root_span = tracer.start_span('root-span')
# span recording works for regular operations within a context only
with span_in_context(root_span):
user = User(name='user', fullname='User', password='password')
session.add(user)
session.commit()
spans = tracer.recorder.get_spans()
assert len(spans) == 4
connect_span, insert_span, commit_span, rollback_span = spans
assert_span(connect_span, 'Connect')
assert_span(insert_span, 'INSERT', root_span)
assert_span(commit_span, 'commit', root_span)
assert_span(rollback_span, 'rollback', root_span)
|
#
# Newfies-Dialer License
# http://www.newfies-dialer.org
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
#
# Copyright (C) 2011-2013 Star2Billing S.L.
#
# The Initial Developer of the Original Code is
# Arezqui Belaid <info@star2billing.com>
#
from django.utils.translation import gettext as _
from apiplayground import APIPlayground
class | CampaignDelCascadeAPIPlayground(APIPlayground):
schema = {
"title": _("campaign delete cascade"),
"base_url": "http://localhost/api/v1/",
"resources": [
{
"name": "/campaign_delete_cascade/",
"description": _("this r | esource allows you to delete campaign."),
"endpoints": [
{
"method": "DELETE",
"url": "/api/v1/campaign_delete_cascade/{campaign-id}/",
"description": _("delete campaign"),
}
]
},
]
}
|
#!/usr/bin/env python3
class Employee:
num_of_emps = 0
raise_amount = 1.04
def __init__(self,first,last):
self.first = first
self.last = last
self.email = first + '.' + last + '@kellynoah.com'
def fullname(self):
return '{} {}'.format(self.first,self.last)
def | apply_raise(self):
self.pay = int(self.pay * self.raise_amount)
def __repr__(self):
return "Employee('{}', '{}', '{}')".format(self.first, self.last, self.pay)
def __str__(self):
return '{} - {}'.format(self.fullname(), self.email)
de | f __add__(self, other):
return self.pay + other.pay
def __len__(self):
return len(self.fullname())
emp_1 = Employee('John', 'Smith')
print(emp_1.first)
print(emp_1.email)
print(emp_1.fullname())
|
#第四集(包含部分文件3.py和部分第二集)
# courses=['History','Math','Physics','Compsci']#此行代码在Mutable之前都要打开
# print(courses)
# courses.append('Art')#在最后添加一个元素
# courses.insert(0,'English')#在0的位置添加一个元素
# courses_2=['Chin | ese','Education']
# courses.insert(1,courses_2)#看看这条代码与下面两条代码有什么不同
# courses.append(courses_2)
# courses.extend(courses_2)
# #用pop删除和用remove删除可以详见3.py
# # courses.remove('Math')#删除一个元素
# popped=courses.pop()#删除一个元素并将该元素赋值给popped (括号内无数字则默认最后一个)
# print(popped)#输出被删除的元素
# courses.reverse()#将元素倒叙
# courses.sort()#排序 按开头字母的顺序 数字排在字母前
# print(courses)
# courses.sort(reverse=True)#按顺序倒叙(若=False则无效)
# print(courses)
# sorted_courses=sorted(courses)
# print(s | orted_courses)
# alphabet=['DA1','SA2','AD3','3AD']
# alphabet.sort()
# print(alphabet)
# nums=[3,5,1,4,2]
# nums.sort()
# print(nums)
# print(min(nums))#输出最小数
# print(max(nums))#输出最大数
# print(sum(nums))#输出总和
# #中文不知道是什么规则
# Chinese=['啊了','吧即','啦']
# Chinese.sort()
# print(Chinese)
# print(courses.index('Math'))#查找某元素在列表中的位置
# print('Art' in courses)#True则表示该元素存在于列表,False则是不存在
#for和in语言
# for item in courses: #将courses中的元素一个一个输出
# print(item)
# #输出元素位置和元素
# for course in enumerate(courses):
# print(course)
# for index,course in enumerate(courses):
# print(index,course)
# for index,course in enumerate(courses,start=1):
# print(index,course)
# courses_str=' - '.join(courses)#将' - '插入courses中输出
# new_list=courses_str.split(' - ')#将' - '从courses_str中删除
# print(courses_str)
# print(new_list)
# #Mutable (可变的)
# list_1=['History','Math','Physics','Compsci']
# list_2=list_1
# print(list_1)
# print(list_2)
# list_1[0]='Art'
# print(list_1)
# print(list_2)
# #Immutable (不可变的)(这里很神奇,视频上不可以但是我可以)
# tuple_1=['History','Math','Physics','Compsci']
# tuple_2=tuple_1
# print(tuple_1)
# print(tuple_2)
# tuple_1[0]='Art'
# print(tuple_1)
# print(tuple_2)
# #Sets
# cs_courses={'History', 'Math', 'Physics', 'Compsci','Math'}#用大括号则会将两个相同的元素只输出前一个
# art_courses={'History', 'Math', 'Art', 'Design'}
# print(cs_courses)
# print(cs_courses.intersection(art_courses))#输出两个列表中相同的元素
# print(cs_courses.difference(art_courses))#输出两个列表中不相同的元素
# print(cs_courses.union(art_courses))#将两个列表合并(每次运行顺序都不同)
#Empty Lists
empty_list=[]
empty_list=list()
#Empty Tuples
empty_tuple=()
empty_tuple=tuple()
#Empty Sets
empty_set={} #错误的
empty_set=set()
|
om test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from test import run_only
from mock import Mock
from diamond.collector import Collector
from elb import ElbCollector
def run_only_if_boto_is_available(func):
try:
import boto
except ImportError:
boto = None
pred = lambda: boto is not None
return run_only(func, pred)
class TestElbCollector(CollectorTestCase):
@run_only_if_boto_is_available
def test_throws_exception_when_interval_not_multiple_of_60(self):
config = get_collector_config('ElbCollector',
{'enabled': True,
'interval': 10})
assertRaisesAndContains(Exception, 'multiple of',
ElbCollector, *[config, None])
@run_only_if_boto_is_available
@patch('elb.cloudwatch')
@patch('boto.ec2.connect_to_region')
@patch('boto.ec2.elb.connect_to_region')
@patch.object(Collector, 'publish_metric')
def test_ignore(self, publish_metric, elb_connect_to_region,
ec2_connect_to_region, cloudwatch):
config = get_collector_config(
'ElbCollector',
{
'enabled': True,
'interval': 60,
'regions': {
'us-west-1': {}
},
'elbs_ignored': ['^to_ignore', ],
})
az = Mock()
az.name = 'us-west-1a'
ec2_conn = Mock()
ec2_conn.get_all_zones = Mock()
ec2_conn.get_all_zones.return_value = [az]
ec2_connect_to_region.return_value = ec2_conn
elb1 = Mock()
elb1.name = 'elb1'
elb2 = Mock()
elb2.name = 'to_ignore'
elb_conn = Mock()
elb_conn.get_all_load_balancers = Mock()
elb_conn.get_all_load_balancers.return_value = [elb1, elb2]
elb_connect_to_region.return_value = elb_conn
cw_conn = Mock()
cw_conn.region = Mock()
cw_conn.region.name = 'us-west-1'
cw_conn.get_metric_statistics = Mock()
ts = datetime.datetime.utcnow().replace(second=0, microsecond=0)
cw_conn.get_metric_statistics.side_effect = [
[{u'Timestamp': ts, u'Average': 1.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Average': 2.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 3.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Average': 4.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 6.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 7.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 8.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 9.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 10.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 11.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 12.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Maximum': 13.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 14.0, u'Unit': u'Count'}],
]
cloudwatch.connect_to_region = Mock()
cloudwatch.connect_to_region.return_value = cw_conn
collector = ElbCollector(config, handlers=[])
target = ts + datetime.timedelta(minutes=1)
with mock.patch.object(datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)) as patched:
patched.utcnow.return_value = target
collector.collect()
self.assertPublishedMetricMany(
publish_metric,
{
'us-west-1a.elb1.HealthyHostCount': 1,
'us-west-1a.elb1.UnHealthyHostCount': 2,
'us-west-1a.elb1.RequestCount': 3,
'us-west-1a.elb1.Latency': 4,
'us-west-1a.elb1.HTTPCode_ELB_4XX': 6,
'us-west-1a.elb1.HTTPCode_ELB_5XX': 7,
'us-west-1a.elb1.HTTPCode_Backend_2XX': 8,
'us-west-1a.elb1.HTTPCode_Backend_3XX': 9,
'us-west-1a.elb1.HTTPCode_Backend_4XX': 10,
'us-west-1a.elb1.HTTPCode_Backend_5XX': 11,
'us-west-1a.elb1.BackendConnectionErrors': 12,
'us-west-1a.elb1.SurgeQueueLength': 13,
'us-west-1a.elb1.SpilloverCount': 14,
})
@run_only_if_boto_is_available
@patch('elb.cloudwatch')
@patch('boto.ec2.connect_to_region')
@patch.object(Collector, 'publish_metric')
def test_collect(self, publish_metric, connect_to_region, cloudwatch):
config = get_collector_config(
'ElbCollector',
{
'enabled': True,
'interval': 60,
'regions': {
'us-west-1': {
'elb_names': ['elb1'],
}
}
})
az = Mock()
az.name = 'us-west-1a'
ec2_conn = Mock()
ec2_conn.get_all_zones = Mock()
ec2_conn.get_all_zones.return_value = [az]
connect_to_region.return_value = ec2_conn
cw_conn = Mock()
cw_conn.region = Mock()
cw_conn.region.name = 'us-west-1'
cw_conn.get_metric_statistics = Mock()
ts = datetime.datetime.utcnow().replace(second=0, microsecond=0)
cw_conn.get_metric_statistics.side_effect = [
[{u'Timestamp': ts, u'Average': 1.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Average': 2.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 3.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Average': 4.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 6.0, u'Unit': u'Count'}],
| [{u'Timestamp': ts, u'Sum': 7.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 8.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 9.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 10.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 11.0, u'Unit': u'Count'}],
[{u'T | imestamp': ts, u'Sum': 12.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Maximum': 13.0, u'Unit': u'Count'}],
[{u'Timestamp': ts, u'Sum': 14.0, u'Unit': u'Count'}],
]
cloudwatch.connect_to_region = Mock()
cloudwatch.connect_to_region.return_value = cw_conn
collector = ElbCollector(config, handlers=[])
target = ts + datetime.timedelta(minutes=1)
with mock.patch.object(datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)) as patched:
patched.utcnow.return_value = target
collector.collect()
self.assertPublishedMetricMany(
publish_metric,
{
'us-west-1a.elb1.HealthyHostCount': 1,
'us-west-1a.elb1.UnHealthyHostCount': 2,
'us-west-1a.elb1.RequestCount': 3,
'us-west-1a.elb1.Latency': 4,
'us-west-1a.elb1.HTTPCode_ELB_4XX': 6,
'us-west-1a.elb1.HTTPCode_ELB_5XX': 7,
'us-west-1a.elb1.HTTPCode_Backend_2XX': 8,
'us-west-1a.elb1.HTTPCode_Backend_3XX': 9,
'us-west-1a.elb1.HTTPCode_Backend_4XX': 10,
'us-west-1a.elb1.HTTPCode_Backend_5XX': 11,
'us-west-1a.elb1.BackendConnectionErrors': 12,
'us-west-1a.elb1.SurgeQueueLength': 13,
'us-west-1a.elb1.SpilloverCount': 14,
})
def assertRaisesAndContains(excClass, contains_str, callableObj, *args,
**kwargs):
try:
callableObj(*args, **kwargs)
except excClass as e:
msg = str(e)
if contains_str in msg:
return
else:
raise AssertionError(
"Exception message does not contain '%s': '%s'" % (
contains_str, msg))
else:
if hasattr(excClass, '__name__'):
excName = excClass.__name__
else:
excName = str(excClass)
raise AssertionE |
#!/usr/bin/env python
# -*- codin | g: utf-8 -*-
#
# Copyright (C) 2010 Radim Rehurek <radimrehurek@seznam.cz>
# Licensed under the GNU | LGPL v2.1 - http://www.gnu.org/licenses/lgpl.html
"""
Blei's LDA-C format.
"""
from __future__ import with_statement
import logging
from gensim import interfaces, utils
from gensim.corpora import IndexedCorpus
logger = logging.getLogger('gensim.corpora.bleicorpus')
class BleiCorpus(IndexedCorpus):
"""
Corpus in Blei's LDA-C format.
The corpus is represented as two files: one describing the documents, and another
describing the mapping between words and their ids.
Each document is one line::
N fieldId1:fieldValue1 fieldId2:fieldValue2 ... fieldIdN:fieldValueN
The vocabulary is a file with words, one word per line; word at line K has an
implicit ``id=K``.
"""
def __init__(self, fname, fname_vocab=None):
"""
Initialize the corpus from a file.
`fname_vocab` is the file with vocabulary; if not specified, it defaults to
`fname.vocab`.
"""
IndexedCorpus.__init__(self, fname)
logger.info("loading corpus from %s" % fname)
if fname_vocab is None:
fname_vocab = fname + '.vocab'
self.fname = fname
words = [word.rstrip() for word in open(fname_vocab)]
self.id2word = dict(enumerate(words))
self.length = None
def __iter__(self):
"""
Iterate over the corpus, returning one sparse vector at a time.
"""
length = 0
for lineNo, line in enumerate(open(self.fname)):
length += 1
yield self.line2doc(line)
self.length = length
def line2doc(self, line):
parts = line.split()
if int(parts[0]) != len(parts) - 1:
raise ValueError("invalid format in %s: %s" %
(self.fname, repr(line)))
doc = [part.rsplit(':', 1) for part in parts[1:]]
doc = [(int(p1), float(p2)) for p1, p2 in doc]
return doc
@staticmethod
def save_corpus(fname, corpus, id2word=None):
"""
Save a corpus in the LDA-C format.
There are actually two files saved: `fname` and `fname.vocab`, where
`fname.vocab` is the vocabulary file.
This function is automatically called by `BleiCorpus.serialize`; don't
call it directly, call `serialize` instead.
"""
if id2word is None:
logger.info("no word id mapping provided; initializing from corpus")
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = 1 + max([-1] + id2word.keys())
logger.info("storing corpus in Blei's LDA-C format: %s" % fname)
with open(fname, 'w') as fout:
offsets = []
for doc in corpus:
doc = list(doc)
offsets.append(fout.tell())
fout.write("%i %s\n" % (len(doc),
' '.join("%i:%s" % p for p in doc if abs(p[1]) > 1e-12)))
# write out vocabulary, in a format compatible with Blei's topics.py script
fname_vocab = fname + '.vocab'
logger.info("saving vocabulary of %i words to %s" % (num_terms, fname_vocab))
with open(fname_vocab, 'w') as fout:
for featureid in xrange(num_terms):
fout.write("%s\n" % utils.to_utf8(id2word.get(featureid, '---')))
return offsets
def docbyoffset(self, offset):
"""
Return the document stored at file position `offset`.
"""
with open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
#endclass BleiCorpus
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceWriteFailureData(Model):
"""Schema of the Data property of an EventGridEvent for a
Microsoft.Resources.ResourceWriteFailure event. This is raised when a
resource create or update operation fails.
:param tenant_id: The tenant ID of the resource.
:type tenant_id: str
:param subscription_id: The subscription ID of the resource.
:type subscription_id: str
:param resource_group: The resource group of the resource.
:type resource_group: str
:param resource_provider: The resource provider performing the operation.
:type resource_provider: str
:param resource_uri: The URI of the resource in the operation.
:type resource_uri: str
:param operation_name: The operation that was performed.
:type operation_name: str
:param status: The status of the operation.
:type status: str
:param authorization: The requested authorization for the operation.
:type authorization: str
:param claims: The properties of the claims.
:type claims: str
:param correlation_id: An operation ID used for troubleshooting.
:type correlation_id: str
:param http_request: The details of the operation.
:type http_request: str
"""
_attribute_map = {
'tenant_id': {'key': 'tenantId', 'type': 'str'},
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'resource_group': {'key': 'resourceGroup', 'type': 'str'},
'resource_provider': {'key': 'resourceProvider', 'type': 'str'},
'resource_uri': {'key': 'resourceUri', 'type': 'str'},
'operation_name': {'key': 'operationName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'authorization': {'key': 'authorization', 'type': 'str'},
'claims': {'key': 'claims', 'type': 'str'},
'correlation_id': {'key': 'correlationId', 'type': 'str'},
'http_request': {'key | ': 'httpRequest', 'type': 'str'},
}
def __init__(self, tenant_id=None, subscription_id=None, resource_group=None, resource_provider=None, resource_uri=None, operation_name=None, status=None, authorization=None, claims=None, correlation_id=None, http_request=None):
super(ResourceWriteFailureData, s | elf).__init__()
self.tenant_id = tenant_id
self.subscription_id = subscription_id
self.resource_group = resource_group
self.resource_provider = resource_provider
self.resource_uri = resource_uri
self.operation_name = operation_name
self.status = status
self.authorization = authorization
self.claims = claims
self.correlation_id = correlation_id
self.http_request = http_request
|
"""(Re)builds feeds for categories"""
import os
import datetime
import jinja2
from google.appengine.api import app_identity
import dao
import util
def build_and_save_for_category(cat, store, prefix):
"""Build and save feeds for category"""
feed = build_feed(cat)
save_feeds(store, feed, prefix, cat.key.id())
def build_feed(cat):
"""Build feed for category"""
feed = Feed(title=cat.title, link=get_app_url())
items = dao.latest_torrents(feed_size(cat), cat.key)
for item in items:
feed.add_item(item)
return feed
def get_app_url():
"""Returns full URL for app engine app"""
app_id = app_identity.get_application_id()
return 'http://{}.appspot.com/'.format(app_id)
def save_feeds(store, feed, prefix, name):
"""Saves feeds to storage"""
xml = feed.render_short_rss()
path = os.path.join(prefix, 'short', '{}.xml'.format(name))
store.put(path, xml.encode('utf-8'), 'application/rss+xml')
class Feed(object):
"""Represents feed with torrent entries"""
def __init__(self, title, link, ttl=60, description=None):
self.title = title
self.link = link
self.description = description or title
self.ttl = ttl
self.items = []
self | .lastBuildDate = None
self.latest_item_dt = datetime.datetime.utcfromtimestamp(0)
def add_item(self, item):
self.items.append(item)
if self.latest_item_dt < item.dt:
self.latest_item_dt = item.dt
def render_short_rss(self):
self.lastBuildDate = self.latest_item_dt
env = make_jinja_env()
template = env.g | et_template('rss_short.xml')
return template.render(feed=self)
def make_jinja_env():
jinja2_env = jinja2.Environment(
loader=jinja2.FileSystemLoader('templates'),
# loader=PackageLoader('package_name', 'templates'),
autoescape=True,
extensions=['jinja2.ext.autoescape']
)
jinja2_env.filters['rfc822date'] = util.datetime_to_rfc822
return jinja2_env
def feed_size(category):
"""Returns number of feed entries for category"""
if category.key.id() == 'r0': # Root category
return 100
elif category.key.id().startswith('c'): # Level 2 category
return 50
return 25 # category with subcategories
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions that deal with local and device ports."""
import contextlib
import fcntl
import httplib
import logging
import os
import socket
import traceback
# The net test server is started from port 10201.
_TEST_SERVER_PORT_FIRST = 10201
_TEST_SERVER_PORT_LAST = 30000
# A file to record next valid port of test server.
_TEST_SERVER_PORT_FILE = '/tmp/test_server_port'
_TEST_SERVER_PORT_LOCKFILE = '/tmp/test_server_port.lock'
# The following two methods are used to allocate the port source for various
# types of test servers. Because some net-related tests can be run on shards at
# same time, it's important to have a mechanism to allocate the port
# process-safe. In here, we implement the safe port allocation by leveraging
# flock.
def ResetTestServerPortAllocation():
"""Resets the port allocation to start from TEST_SERVER_PORT_FIRST.
Returns:
Returns True if reset successes. Otherwise returns False.
"""
try:
with open(_TEST_SERVER_PORT_FILE, 'w') as fp:
fp.write('%d' % _TEST_SERVER_PORT_FIRST)
if os.path.exists(_TEST_SERVER_PORT_LOCKFILE):
os.unlink(_TEST_SERVER_PORT_LOCKFILE)
return True
except Exception: # pylint: disable=broad-except
logging.exception('Error while resetting port allocation')
return False
def AllocateTestServerPort():
"""Allocates a port incrementally.
Returns:
Returns a valid port which should be in between TEST_SERVER_PORT_FIRST and
TEST_SERVER_PORT_LAST. Returning 0 means no more valid port can be used.
"""
port = 0
ports_tried = []
try:
fp_lock = open(_TEST_SERVER_PORT_LOCKFILE, 'w')
fcntl.flock(fp_lock, fcntl.LOCK_EX)
# Get current valid port and calculate next valid port.
if not os.path.exists(_TEST_SERVER_PORT_FILE):
ResetTestServerPortAllocation()
with open(_TEST_SERVER_PORT_FILE, 'r+') as fp:
port = int(fp.read())
ports_tried.append(port)
while not IsHostPortAvailable(port):
port += 1
ports_tried.append(port)
if (port > _TEST_SERVER_PORT_LAST or
port < _TEST_SERVER_PORT_FIRST):
port = 0
else:
fp.seek(0, os.SEEK_SET)
fp.write('%d' % (port + 1))
except Exception: # pylint: disable=broad-except
logging.exception('ERror while allocating port')
finally:
if fp_lock:
fcntl.flock(fp_lock, fcntl.LOCK_UN)
fp_lock.close()
if port:
logging.info('Allocate port %d for test server.', port)
else:
logging.error('Could not allocate port for test server. '
'List of ports tried: %s', str(ports_tried))
return port
def IsHostPortAvailable(host_port):
"""Checks whether the specified host port is available.
Args:
host_port: Port on host to check.
Returns:
True if the port on host is available, otherwise returns False.
"""
s = socket.socket()
try:
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(('', host_port))
s.close()
return True
except socket.error:
return False
def IsDevicePortUsed(device, device_port, state=''):
"""Checks whether the specified device port is used or not.
Args:
device: A DeviceUtils instance.
device_port: Port on device we want to check.
state: String of the specified state. Default is empty string, which
means any state.
Returns:
True if the port on device is already used, otherwise returns False.
"""
base_urls = ('127.0.0.1:%d' % device_port, 'localhost:%d' % device_port)
netstat_results = device.RunShellCommand(
['netstat', '-a'], check_return=True, large_output=True)
for single_connect in netstat_results:
# Column 3 is the local address which we want to check with.
connect_results = single_connect.split()
if connect_results[0] != 'tcp':
continue
if len(connect_results) < 6:
raise Exception('Unexpected format while parsing netstat line: ' +
single_connect)
is_state_match = connect_results[5] == state if state else True
if connect_results[3] in base_urls and is_state_match:
return True
return False
def IsHttpServerConnectable(host, port, tries=3, command='GET', path='/',
expected_read='', timeout=2):
"""Checks whether the specified http server is ready to serve request or not.
Args:
host: Host name of the HTTP server.
port: Port number of the HTTP server.
tries: How many times we want to test the connection. The default value is
3.
command: The http command we use to connect to HTTP server. The default
command is 'GET'.
path: The path we use when connecting to HTTP server. The default path is
'/'.
expected_read: The content we expect to read from the response. The default
value is ''.
timeout: Timeout (in seconds) for each http connection. The default is 2s.
Returns:
Tuple of (connect status, client error). connect status is a boolean value
to indicate whether the server is connectable. client_error is the error
message the server returns when connect status is false.
"""
assert tries >= 1
for i in xrange(0, tries):
client_error = None
try:
with contextlib.closing(httplib.HTTPConnection(
host, port, timeout=timeout)) as http:
# Output some debug information when we have tried more than 2 times.
http.set_debuglevel(i >= 2)
http.request(command, path)
r = http.getresponse()
content = r.read()
if r.status == 200 and r.reason == 'OK' and content == expected_read:
return (True, '')
client_error = ('Bad response: %s %s version %s\n ' %
(r.status, r.reason, r.version) +
'\n '.join([': '.join(h) for h in r.getheaders()]))
except (httplib.HTTPException, socket.error) as e:
# Probably too quick connecting: try again.
exception_error_msgs = traceback.format_exception_only(type(e), e)
if except | ion_error_msgs:
client_error = ' | '.join(exception_error_msgs)
# Only returns last client_error.
return (False, client_error or 'Timeout')
|
# coding:utf-8
from urllib import parse as url_parse
from logger.log import crawler
from apps.celery_init import celery
from page_get.basic import get_page
from config.conf import get_max_search_page
from page_parse import search as parse_search
from db.search_words import get_search_keywords
from db.keywords_wbdata import insert_keyword_wbid
from db.wb_data import insert_weibo_data, get_wb_by_mid
# This url is just for original weibos.
# If you want other kind of search, you can change the url below
url = 'http://s.weibo.com/weibo/{}&scope=ori&suball=1&page={}'
limit = get_max_search_page() + 1
@celery.task(ignore_result=True)
def search_keyword(keyword, keyword_id):
cur_page = 1
encode_keyword = | url_parse.quote(keyword)
while cur_page < limit:
cur_url = url.format(encode_keyword, cur_page)
search_page = get_page(cur_url)
if not search_page:
crawler.warning('No result for keyword {}, the source page is {}'.format(keyword, search_page))
return
search_list = parse_search.get_search_info(search_page)
# Because the search results are sorted by time, if any result has been stor | ed in mysql,
# we need not crawl the same keyword in this turn
for wb_data in search_list:
rs = get_wb_by_mid(wb_data.weibo_id)
if rs:
crawler.info('keyword {} has been crawled in this turn'.format(keyword))
return
else:
insert_weibo_data(wb_data)
insert_keyword_wbid(keyword_id, wb_data.weibo_id)
# send task for crawling user info
celery.send_task('celery_tasks.weibo.user.crawl_person_infos', args=(wb_data.uid,), queue='user_crawler',
routing_key='for_user_info')
if 'page next S_txt1 S_line1' in search_page:
cur_page += 1
else:
crawler.info('keyword {} has been crawled in this turn'.format(keyword))
return
@celery.task(ignore_result=True)
def excute_search_task():
keywords = get_search_keywords()
for each in keywords:
celery.send_task('celery_tasks.weibo.search.search_keyword', args=(each[0], each[1]), queue='search_crawler',
routing_key='for_search_info') |
from distutils.core import setup
from | ripwrap import __VERSION__
setup(
name = 'ripwrap',
version = __VERSION__,
description = 'A wrapper for ReSTinPeace, for Django applications.',
long_description = open('README').read()
author = 'P.C. Shyamshankar',
packages = ['ripwrap'],
url = 'http://github.com/sykora/django-ripwrap/', |
license = 'GNU General Public License v3.0',
classifiers = (
'Development Status :: 1 - Planning',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Programming Language :: Python',
)
)
|
at least one value is not NaT
# The cast currently succeeds, but the values are invalid:
cast._simple_strided_call((arr1, arr2))
with pytest.raises(ValueError):
str(arr2[-1]) # e.g. conversion to string fails
return
cast._simple_strided_call((arr1, arr2))
assert [int(v) for v in arr2.tolist()] == values
# Check that the same results are achieved for strided loops
arr1_o, arr2_o = self.get_data_variation(arr1, arr2, True, False)
cast._simple_strided_call((arr1_o, arr2_o))
assert_array_equal(arr2_o, arr2)
assert arr2_o.tobytes() == arr2.tobytes()
@pytest.mark.parametrize(
["from_dt", "to_dt", "expected_casting", "nom", "denom"],
[("M8[ns]", None,
Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("M8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("M8", "M8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("M8[ms]", "M8", Casting.unsafe, 1, 1), # should be invalid cast
("M8[5ms]", "M8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("M8[ns]", "M8[ms]", Casting.same_kind, 1, 10**6),
("M8[ms]", "M8[ns]", Casting.safe, 10**6, 1),
("M8[ms]", "M8[7ms]", Casting.same_kind, 1, 7),
("M8[4D]", "M8[1M]", Casting.same_kind, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, -1, 1314, -1315, 564442610]),
("m8[ns]", None, Casting.no | Casting.cast_is_view, 1, 1),
(str(np.dtype("m8[ns]").newbyteorder()), None, Casting.equiv, 1, 1),
("m8", "m8[ms]", Casting.safe | Casting.cast_is_view, 1, 1),
("m8[ms]", "m8", Casting.unsafe, 1, 1), # should be invalid cast
("m8[5ms]", "m8[5ms]", Casting.no | Casting.cast_is_view, 1, 1),
("m8[ns]", "m8[ms]", Casting.same_kind, 1, 10**6),
("m8[ms]", "m8[ns]", Casting.safe, 10**6, 1),
("m8[ms]", "m8[7ms]", Casting.same_kind, 1, 7),
("m8[4D]", "m8[1M]", Casting.unsafe, None,
# give full values based on NumPy 1.19.x
[-2**63, 0, 0, 1314, -1315, 564442610])])
def test_time_to_ti | me(self, from_dt, to_dt, expected_casting, nom, denom):
from_dt = np.dtype(from_dt)
if to_dt is not None:
to_dt = np.dtype(to_dt)
# Test a few values for casting (results generated with NumPy 1.19)
values = np.array([-2**63, 1, 2**63-1, 10000, -10000, 2**32])
values = values.astype(np.dtype("int64").newbyteorder(from_d | t.byteorder))
assert values.dtype.byteorder == from_dt.byteorder
assert np.isnat(values.view(from_dt)[0])
DType = type(from_dt)
cast = get_castingimpl(DType, DType)
casting, (from_res, to_res) = cast._resolve_descriptors((from_dt, to_dt))
assert from_res is from_dt
assert to_res is to_dt or to_dt is None
assert casting == expected_casting
if nom is not None:
expected_out = (values * nom // denom).view(to_res)
expected_out[0] = "NaT"
else:
expected_out = np.empty_like(values)
expected_out[...] = denom
expected_out = expected_out.view(to_dt)
orig_arr = values.view(from_dt)
orig_out = np.empty_like(expected_out)
if casting == Casting.unsafe and (to_dt == "m8" or to_dt == "M8"):
# Casting from non-generic to generic units is an error and should
# probably be reported as an invalid cast earlier.
with pytest.raises(ValueError):
cast._simple_strided_call((orig_arr, orig_out))
return
for aligned in [True, True]:
for contig in [True, True]:
arr, out = self.get_data_variation(
orig_arr, orig_out, aligned, contig)
out[...] = 0
cast._simple_strided_call((arr, out))
assert_array_equal(out.view("int64"), expected_out.view("int64"))
def string_with_modified_length(self, dtype, change_length):
fact = 1 if dtype.char == "S" else 4
length = dtype.itemsize // fact + change_length
return np.dtype(f"{dtype.byteorder}{dtype.char}{length}")
@pytest.mark.parametrize("other_DT", simple_dtypes)
@pytest.mark.parametrize("string_char", ["S", "U"])
def test_string_cancast(self, other_DT, string_char):
fact = 1 if string_char == "S" else 4
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(other_DT, string_DT)
other_dt = other_DT()
expected_length = get_expected_stringlength(other_dt)
string_dt = np.dtype(f"{string_char}{expected_length}")
safety, (res_other_dt, res_dt) = cast._resolve_descriptors((other_dt, None))
assert res_dt.itemsize == expected_length * fact
assert safety == Casting.safe # we consider to string casts "safe"
assert isinstance(res_dt, string_DT)
# These casts currently implement changing the string length, so
# check the cast-safety for too long/fixed string lengths:
for change_length in [-1, 0, 1]:
if change_length >= 0:
expected_safety = Casting.safe
else:
expected_safety = Casting.same_kind
to_dt = self.string_with_modified_length(string_dt, change_length)
safety, (_, res_dt) = cast._resolve_descriptors((other_dt, to_dt))
assert res_dt is to_dt
assert safety == expected_safety
# The opposite direction is always considered unsafe:
cast = get_castingimpl(string_DT, other_DT)
safety, _ = cast._resolve_descriptors((string_dt, other_dt))
assert safety == Casting.unsafe
cast = get_castingimpl(string_DT, other_DT)
safety, (_, res_dt) = cast._resolve_descriptors((string_dt, None))
assert safety == Casting.unsafe
assert other_dt is res_dt # returns the singleton for simple dtypes
@pytest.mark.parametrize("string_char", ["S", "U"])
@pytest.mark.parametrize("other_dt", simple_dtype_instances())
def test_simple_string_casts_roundtrip(self, other_dt, string_char):
"""
Tests casts from and to string by checking the roundtripping property.
The test also covers some string to string casts (but not all).
If this test creates issues, it should possibly just be simplified
or even removed (checking whether unaligned/non-contiguous casts give
the same results is useful, though).
"""
string_DT = type(np.dtype(string_char))
cast = get_castingimpl(type(other_dt), string_DT)
cast_back = get_castingimpl(string_DT, type(other_dt))
_, (res_other_dt, string_dt) = cast._resolve_descriptors((other_dt, None))
if res_other_dt is not other_dt:
# do not support non-native byteorder, skip test in that case
assert other_dt.byteorder != res_other_dt.byteorder
return
orig_arr, values = self.get_data(other_dt, None)
str_arr = np.zeros(len(orig_arr), dtype=string_dt)
string_dt_short = self.string_with_modified_length(string_dt, -1)
str_arr_short = np.zeros(len(orig_arr), dtype=string_dt_short)
string_dt_long = self.string_with_modified_length(string_dt, 1)
str_arr_long = np.zeros(len(orig_arr), dtype=string_dt_long)
assert not cast._supports_unaligned # if support is added, should test
assert not cast_back._supports_unaligned
for contig in [True, False]:
other_arr, str_arr = self.get_data_variation(
orig_arr, str_arr, True, contig)
_, str_arr_short = self.get_data_variation(
orig_arr, str_arr_short.copy(), True, contig)
_, str_arr_long = self.get_data_variation(
orig_arr, str_arr_long, True, contig)
cast._simple_st |
from unittest import TestCase |
from cloudshell.cp.vcenter.network.vlan.factory import VlanSpecFactory
class TestVlanSpecFactory(TestCase):
def test_get_vlan_spec(self):
vlan_spec_factory = VlanSpecFactory()
vlan_spec = vlan_spec_factory.get_vlan_spec('Access')
| self.assertIsNotNone(vlan_spec)
|
# Austin Jenchi
| # 1/30/2015
# 8th Period
# Paycheck
print "Welcome to How to Job"
print
wage_per_hour = raw_input("How much is your hourly wage? ==> $")
if not wage_per_hour == "":
try:
wage_per_hour = float(wage_per_hour)
except:
wage_per_hour = 12.00
else:
wage_per_hour = 12.00
print "Your pay is $%2.2f per hour." % wage_per_hour
print
print "You've worked 26 hours. (in one 24-hour day! remarkable!)"
print
total_wage = wage_per_hour * 26
print "Your Pay Before Taxes is $%2 | .2f" % total_wage
print
print "After taxes of 23%%, your total pay is $%2.2f." % (total_wage * .23)
print
print "After paying your union fees, you recieved a measly $%2.2f of your previous $%2.2f." % ((total_wage * .23) - 25, total_wage)
|
"""
sentry.web.frontend.generic
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import os
import posixpath
from django.conf import settings
from django.http import HttpResponseNotFound, Http404
from django.contrib.staticfiles import finders
from django.utils.six.moves.urllib.parse import unquote
from django.views import static
from django.views.generic import TemplateView as BaseTemplateView
from sentry.web.helpers import render_to_response
FOREVER_CACHE = 'max-age=315360000'
NEVER_CACHE = 'max-age=0, no-cache, no-store, must-revalidate'
def dev_favicon(request):
document_root, path = resolve('sentry/images/favicon_dev.png')
return static.serve(request, path, document_root=document_root)
def resolve(path):
# Mostly yanked from Django core and changed to return the path:
# See: https://github.com/django/django/blob/1.6.11/django/contrib/staticfiles/views.py
normalized_path = posixpath.normpath(unquote(path)).lstrip('/')
try:
absolute_path = finders.find(normalized_path)
except Exception:
# trying to access bad paths like, `../../etc/passwd`, etc that
# Django rejects, but respond nicely instead of erroring.
absolute_path = None
if not absolute_path:
raise Http404("'%s' could not be found" % path)
if path[-1] == '/' or os.path.isdir(absolute_path):
raise Http404('Directory indexes are not allowed here.')
return os.path.split(absolute_path)
def static_media(request, **kwargs):
"""
Serve static files below a given point in the directory structure.
"""
module = kwargs.get('module')
path = kwargs.get('path', '')
version = kwargs.get('version')
if module:
path = '%s/%s' % (module, path)
try:
document_root, path = resolve(path)
except Http404:
# Return back a simpler plain-text 404 response, more suitable
# for static files, rather than our full blown HTML.
return HttpResponseNotFound('', content_type='text/plain')
if 'gzip' in request.META.get('HTTP_ACCEPT_ENCODING', ''
) and not path.endswith('.gz') and not settings.DEBUG:
paths = (path + '.gz', path)
else:
paths = (path, )
for p in paths:
try:
response = static.serve(request, p, document_root=document_root)
break
except Http404:
# We don't need to handle this since `resolve()` is assuring to us that
# at least the non-gzipped version exists, so in theory, this can
# only happen on the first .gz path
continue
# Make sure we Vary: Accept-Encoding for gzipped responses
response['Vary'] = 'Accept-Encoding'
# We need CORS for font files
if path.endswith(('.js', '.ttf', '.ttc', '.otf', ' | .eot', '.woff', '.woff2')):
response['Access-Control-Allow-Origin'] = '*'
# If we have a version and not DEBUG, we can cache it FOREVER
if version is not None and not settings.DEBUG:
response['Cache-Control'] = | FOREVER_CACHE
else:
# Otherwise, we explicitly don't want to cache at all
response['Cache-Control'] = NEVER_CACHE
return response
class TemplateView(BaseTemplateView):
def render_to_response(self, context, **response_kwargs):
return render_to_response(
request=self.request,
template=self.get_template_names(),
context=context,
**response_kwargs
)
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Database migrations for resource-providers."""
from migrate import UniqueConstraint
from sqlalchemy import Column
from sqlalchemy import DateTime
from sqlalchemy import Float
from sqlalchemy import Index
from sqlalchemy import Integer
from sqlalchemy import MetaData
from sqlalchemy import String
from sqlalchemy import Table
from sqlalchemy import Unicode
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
if migrate_engine.name == 'mysql':
nameargs = {'collation': 'utf8_bin'}
else:
nameargs = {}
resource_providers = Table(
'resource_providers', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('uuid', String(36), nullable=False),
Column('name', Unicode(200, **nameargs), nullable=True),
Column('generation', Integer, default=0),
Column('can_host', Integer, default=0),
UniqueConstraint('uuid', name='uniq_resource_providers0uuid'),
UniqueConstraint('name', name='uniq_resource_providers0name'),
Index('resource_providers_name_idx', 'name'),
Index('resource_providers_uuid_idx', 'uuid'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
inventories = Table(
'inventories', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('resource_provider_id', Integer, nullable=False),
Column('resource_class_id', Integer, nullable=False),
Column('total', Integer, nullable=False),
Column('reserved', Integer, nullable=False),
Column('min_unit', Integer, nullable=False),
Column('max_unit', Integer, nullable=False),
Column('step_size', Integer, nullable=False),
Column('allocation_ratio', Float, nullable=False),
Index('inventories_resource_provider_id_idx',
'resource_provider_id'),
Index('inventories_resource_provider_resource_class_idx',
'resource_provider_id', 'resource_class_id'),
Index('inventories_resource_class_id_idx',
'resource_class_id'),
UniqueConstraint('resource_provider_id', 'resource_class_id',
name='uniq_inventories0resource_provider_resource_class'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
allocations = Table(
'allocations', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('id', Integer, primary_key=True, nullable=False),
Column('resource_provider_id', Integer, nullable=False),
Column('consumer_id', String(36), nullable=False),
Column('resource_class_id', Integer, nullable=False),
Column('used', Integer, nullable=False),
Index('allocations_resource_provider_class_used_idx',
'resource_provider_id', 'resource_class_id',
'used'),
Index('allocations_resource_class_id_idx',
'resource_class_id'),
Index('allocations_consumer_id_idx', 'consumer_id'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
resource_provider_aggregates = Table(
'resource_provider_aggregates', meta,
Column('created_at', DateTime),
Column('updated_at', DateTime),
Column('resource_provider_id', Integer, primary_key=True,
nullable=False),
Column('aggregate_id', Integer, primary_key=True, nullable=False),
Index('resource_provider_aggregates_aggre | gate_id_idx',
'aggregate_id'),
mysql_engine='InnoDB',
mysql_charset='latin1'
)
for table in [resource_providers, inventories, allocations,
resource_provider_aggregates]:
table.creat | e(checkfirst=True)
|
#!/usr/bin/env python
import os
import sys
from optparse import OptionParser
from jobTree.src.bioio import logger, setLoggingFromOptions
from jobTree.scriptTree.stack import Stack
from margin.mappers.last import Last, LastChain, LastRealign
from margin.mappers.bwa import Bwa, BwaChain, BwaRealign
from margin.mappers.graphmap import GraphMap, GraphMapChain, GraphMapRealign, GraphMapAnchor, GraphMapAnchorChain, GraphMapAnchorRealign
from margin.utils import pathToBaseNanoporeDir
import cPecan.cPecanEm
from cPecan.cPecanEm import addExpectationMaximisationOptions
def main():
#Parse the inputs args/options
parser = OptionParser(usage="usage: inputFastqFile referenceFastaFile outputSamFile [options]",
version="%prog 0.1")
#Options
parser.add_option("--em", dest="em",
help="Run expectation maximisation (EM)",
default=False, action="store_true")
##Most people would not want to use the following, but I put them here for debug purposes
parser.add_option("--bwa", dest="bwa", help="Use BWA instead of LAST",
default=False, action="store_true")
parser.add_option("--graphmap", dest="graphmap", help="Use GraphMap instead of LAST",
default=False, action="store_true")
parser.add_option("--graphmapanchor", dest="graphmapanchor", help="Use GraphMap with anchor alignment instead of LAST",
default=False, action="store_true")
parser.add_option("--noRealign", dest="noRealign", help="Don't run any realignment step",
default=False, action="store_true")
parser.add_option("--noChain", dest="noChain", help="Don't run any chaining step",
default=False, action="store_true")
parser.add_option("--gapGamma", dest="gapGamma", help="Set the gap gamma for the AMAP function",
default=0.5, type=float)
parser.add_option("--matchGamma", dest="matchGamma", help="Set the match gamma for the AMAP function",
default=0.0, type=float)
#Add the cPecan expectation maximisation options
options = cPecan.cPecanEm.Options()
options.inputModel = os.path.join(pathToBaseNanoporeDir(), "src", "margin", "mappers", "last_hmm_20.txt")
options.modelType="fiveStateAsymmetric" #"threeStateAsymmetric"
options.optionsToRealign="--diagonalExpansion=10 --splitMatrixBiggerThanThis=300"
options.randomStart = True
options.trials = 3
options.outputTrialHmms = True
options.iterations = 100
options.maxAlignmentLengthPerJob=700000
options.maxAlignmentLengthToSample = 50000000
#options.outputXMLModelFile = outputModel + ".xml"
#options.updateTheBand = True
#options.useDefaultModelAsStart = True
#options.setJukesCantorStartingEmissions=0.3
options.trainEmissions=True
#options.tieEmissions = True
addExpectationMaximisationOptions(parser, options)
#Add the jobTree options
Stack.addJobTreeOptions(parser)
#Parse the options/arguments
options, args = parser.parse_args()
#Setup logging
setLoggingFromOptions(options)
#Print help message if no input
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
#Exit if the arguments are not what we expect
if len(args) != 3:
raise RuntimeError("Expected three arguments, got: %s" % " ".join(args))
#Set the mapper
if options.noRealign:
if options.noChain: # i.e. --noChain --noRealign
# mapper = Bwa if options.bwa else Last
mapper = Last;
if (options.bwa):
mapper = Bwa;
if (options.graphmap):
mapper = GraphMap;
if (options.graphmapanchor):
mapper = GraphMapAnchor;
else: # i.e. --noRealign
# mapper = BwaChain if options.bwa else LastChain
mapper = LastChain;
if (options.bwa):
mapper = BwaChain;
if (options.graphmap):
mapper = GraphMapChain;
if (options.graphmapanchor):
mapper = GraphMapAnchorChain;
else:
# mapper = BwaRealign if options.bwa else LastRealign
mapper = LastRealign;
| if (options.bwa):
mapper = BwaRealign;
if (options.graphmap):
mapper = GraphMapRealign;
| if (options.graphmapanchor):
mapper = GraphMapAnchorRealign;
#This line invokes jobTree
i = Stack(mapper(readFastqFile=args[0], referenceFastaFile=args[1], outputSamFile=args[2],
options=options)).startJobTree(options)
#The return value of the jobtree script is the number of failed jobs. If we have any then
#report this.
if i != 0:
raise RuntimeError("Got failed jobs")
if __name__ == '__main__':
from margin.marginAlign import *
main()
|
from ray.rllib.utils.deprecation import deprecation_warning
deprecation_w | arning(
old="ray/rllib/examples/recsim_with_slateq.py",
new="ray/rllib/examples/recommender_sy | stem_with_recsim_and_slateq.py",
error=True,
)
|
import networkx
from yaiep.graph.Node import Node
##
# Classe che rappresenta l'intero spazio di ricerca che viene
# generato via via che il metod | o di ricerca ispeziona nuovi nodi
#
class SearchGraph(networkx.DiGraph):
##
# Crea il grafo di ricerca come un grafo direzionato
# il quale ha come nodo iniziale lo stato iniziale
# dal quale il metodo di ricerca partirà per poter esplorare
# lo spazio delle soluzioni
#
# @param init_state stato iniziale dal quale inizia la ricerca
| def __init__(self, init_state):
networkx.DiGraph.__init__(self)
self._init_state = Node(init_state.copy(), None)
# inserisci lo stato iniziale a partire dal quale ispezionare lo spazio di ricerca
self.add_node(self._init_state)
##
# Restituisce il riferimento allo stato iniziale dal
# quale è iniziata la ricerca
#
def get_init_state(self):
return self._init_state
def __str__(self):
res = ''
for node in self:
res += '{0} -> '.format(str(node.wm))
for adj in self.neighbors(node):
res += str(adj.wm) + '\n'
return res
|
)
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pysdn'
copyright = u'2015, Sergei Garbuzov'
author = u'Sergei Garbuzov'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.3.4'
# The full version, including alpha/beta/rc tags.
release = '1.3.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'nature'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
| #html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# | If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pysdndoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pysdn.tex', u'pysdn Documentation',
u'Sergei Garbuzov', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pysdn', u'pysdn Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pysdn', u'pysdn Documentation',
author, 'pysdn', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If fals |
import math
def isPrime(num):
if num < 2:
return False # 0, 1不是质数
# num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根
boundary = int(math.sqrt(num)) + 1
for i in range(2, boundary):
if num % i == 0:
return False
return True
def primeSieve(size):
sieve = [True] * size # 某格一为乘积, 就置为False
| sieve[0] = False
sieve[1] = True
# num为100时, 它是不可能有因子是大于50的. 比如说60 * ? = 100, 这是不可能的, 所以这里只要比较sqrt(), 平方根 |
boundary = int(math.sqrt(size)) + 1
for i in range(2, boundary):
pointer = i * 2 # startPosition. 以3为例, 3其实是质数, 但它的位数6,9, 12, ...都不是质数
while pointer < size:
sieve[pointer] = False
pointer += i
ret = [] # contains all the prime number within "size"
for i in range(size):
if sieve[i] == True:
ret.append(str(i))
return ret
if __name__ == '__main__':
primes = primeSieve(100)
primesString = ", ".join(primes)
print("prime : ", primesString)
'''
prime : 1, 2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97
''' |
import unittest
from graph_theory.spfa import spfa
class GraphTheoryTests(unittest.TestCase):
def setUp(self):
source = 0
num_nodes = 5
neighbour_list = [[1], # 0
| [2], # 1
[3], # 2
[4, 1], # 3
[1], # 4
]
weights = {(0,1): 20,
(1,2) : 1,
(2,3) : 2,
(3,4) : -2,
| (4, 1): -1,
(3, 1): -4,
}
self.example_graph = (source, num_nodes, weights, neighbour_list)
self.example_graph_cycle = [1,2,3]
def is_cyclicily_equal(self, list1, list2):
if len(list1) != len(list2):
return False
n = len(list1)
for shift in range(n):
if list1 == list2[shift:] + list2[:shift]:
return True
return False
def test_negative_cycle(self):
_, negative_cycle = spfa(*self.example_graph)
# Careful, double negation ahead
assert(negative_cycle is not None)
assert(self.is_cyclicily_equal(negative_cycle, self.example_graph_cycle))
|
= self.size
ec2 = self.get_ec2_connection()
if self.zone_name == None or self.zone_name == '':
# deal with the migration case where the zone is not set in the logical volume:
current_volume = ec2.get_all_volumes([self.volume_id])[0]
self.zone_name = current_volume.zone
ebs_volume = ec2.create_volume(size, self.zone_name, snapshot)
v = Volume()
v.ec2 = self.ec2
v.volume_id = ebs_volume.id
v.name = name
v.mount_point = self.mount_point
v.device = self.device
v.region_name = self.region_name
v.zone_name = self.zone_name
v.put()
return v
def get_ec2_connection(self):
if self.server:
return self.server.ec2
if not hasattr(self, 'ec2') or self.ec2 == None:
self.ec2 = boto.ec2.connect_to_region(self.region_name)
return self.ec2
def _volume_state(self):
ec2 = self.get_ec2_connection()
rs = ec2.get_all_volumes([self.volume_id])
return rs[0].volume_state()
def _attachment_state(self):
ec2 = self.get_ec2_connection()
rs = ec2.get_all_volumes([self.volume_id])
return rs[0].attachment_state()
def _size(self):
if not hasattr(self, '__size'):
ec2 = self.get_ec2_connection()
rs = ec2.get_all_volumes([self.volume_id])
self.__size = rs[0].size
return self.__size
def install_xfs(self):
if self.server:
self.server.install('xfsprogs xfsdump')
def get_snapshots(self):
"""
Returns a list of all completed snapshots for this volume ID.
"""
ec2 = self.get_ec2_connection()
rs = ec2.get_all_snapshots()
all_vols = [self.volume_id] + self.past_volume_ids
snaps = []
for snapshot in rs:
if snapshot.volume_id in all_vols:
if snapshot.progress == '100%':
snapshot.date = dateutil.parser.parse(snapshot.start_time)
snapshot.keep = True
snaps.append(snapshot)
snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))
return snaps
def attach(self, server=None):
if self.attachment_state == 'attached':
print 'already attached'
return None
if server:
self.server = server
self.put()
ec2 = self.get_ec2_connection()
ec2.attach_volume(self.volume_id, self.server.instance_id, self.device)
def detach(self, force=False):
state = self.attachment_state
if state == 'available' or state == None or state == 'detaching':
print 'already detached'
| return None
ec2 = self.get_ec2_connection()
ec2.detach_volume(self.volume_id, self.server.instance_id, self.device, force)
self.server = None
self.put()
def checkfs(self, use_cmd=None):
if self.server == No | ne:
raise ValueError, 'server attribute must be set to run this command'
# detemine state of file system on volume, only works if attached
if use_cmd:
cmd = use_cmd
else:
cmd = self.server.get_cmdshell()
status = cmd.run('xfs_check %s' % self.device)
if not use_cmd:
cmd.close()
if status[1].startswith('bad superblock magic number 0'):
return False
return True
def wait(self):
if self.server == None:
raise ValueError, 'server attribute must be set to run this command'
with closing(self.server.get_cmdshell()) as cmd:
# wait for the volume device to appear
cmd = self.server.get_cmdshell()
while not cmd.exists(self.device):
boto.log.info('%s still does not exist, waiting 10 seconds' % self.device)
time.sleep(10)
def format(self):
if self.server == None:
raise ValueError, 'server attribute must be set to run this command'
status = None
with closing(self.server.get_cmdshell()) as cmd:
if not self.checkfs(cmd):
boto.log.info('make_fs...')
status = cmd.run('mkfs -t xfs %s' % self.device)
return status
def mount(self):
if self.server == None:
raise ValueError, 'server attribute must be set to run this command'
boto.log.info('handle_mount_point')
with closing(self.server.get_cmdshell()) as cmd:
cmd = self.server.get_cmdshell()
if not cmd.isdir(self.mount_point):
boto.log.info('making directory')
# mount directory doesn't exist so create it
cmd.run("mkdir %s" % self.mount_point)
else:
boto.log.info('directory exists already')
status = cmd.run('mount -l')
lines = status[1].split('\n')
for line in lines:
t = line.split()
if t and t[2] == self.mount_point:
# something is already mounted at the mount point
# unmount that and mount it as /tmp
if t[0] != self.device:
cmd.run('umount %s' % self.mount_point)
cmd.run('mount %s /tmp' % t[0])
cmd.run('chmod 777 /tmp')
break
# Mount up our new EBS volume onto mount_point
cmd.run("mount %s %s" % (self.device, self.mount_point))
cmd.run('xfs_growfs %s' % self.mount_point)
def make_ready(self, server):
self.server = server
self.put()
self.install_xfs()
self.attach()
self.wait()
self.format()
self.mount()
def freeze(self):
if self.server:
return self.server.run("/usr/sbin/xfs_freeze -f %s" % self.mount_point)
def unfreeze(self):
if self.server:
return self.server.run("/usr/sbin/xfs_freeze -u %s" % self.mount_point)
def snapshot(self):
# if this volume is attached to a server
# we need to freeze the XFS file system
try:
self.freeze()
if self.server == None:
snapshot = self.get_ec2_connection().create_snapshot(self.volume_id)
else:
snapshot = self.server.ec2.create_snapshot(self.volume_id)
boto.log.info('Snapshot of Volume %s created: %s' % (self.name, snapshot))
except Exception:
boto.log.info('Snapshot error')
boto.log.info(traceback.format_exc())
finally:
status = self.unfreeze()
return status
def get_snapshot_range(self, snaps, start_date=None, end_date=None):
l = []
for snap in snaps:
if start_date and end_date:
if snap.date >= start_date and snap.date <= end_date:
l.append(snap)
elif start_date:
if snap.date >= start_date:
l.append(snap)
elif end_date:
if snap.date <= end_date:
l.append(snap)
else:
l.append(snap)
return l
def trim_snapshots(self, delete=False):
"""
Trim the number of snapshots for this volume. This method always
keeps the oldest snapshot. It then uses the parameters passed in
to determine how many others should be kept.
The algorithm is to keep all snapshots from the current day. Then
it will keep the first snapshot of the day for the previous seven days.
Then, it will keep the first snapshot of the week for the previous
four weeks. After than, it will keep the first snapshot of the month
for as many months as there are.
"""
snaps = self.get_snapshots()
# Always keep the oldest and the newest
if len(snaps) <= 2:
return snaps
snaps = snaps[1:-1]
now = datetime.datetime.now |
# -*- coding: utf-8 -*-
# Generated by Django 1. | 9.11 on 2017-05-10 15:49
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('sessao', '0001_initial'),
]
operations = [
migrations.AddField(
model_na | me='sessaoplenaria',
name='interativa',
field=models.NullBooleanField(choices=[(True, 'Sim'), (False, 'Não')], verbose_name='Sessão interativa'),
),
]
|
# | Copyright (c) 2017 https://github.com/ping
#
| # This software is released under the MIT License.
# https://opensource.org/licenses/MIT
__version__ = '0.3.9'
|
]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_upgrade_profile_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeProfiles/default')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_get_available_agent_pool_versions_request(
subscription_id: str,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/availableAgentPoolVersions')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_length=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_upgrade_node_image_version_request_initial(
subscription_id: str,
resource_group_name: str,
resource_name: str,
agent_pool_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2022-01-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.ContainerService/managedClusters/{resourceName}/agentPools/{agentPoolName}/upgradeNodeImageVersion')
path_format_arguments = {
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str', min_len | gth=1),
"resourceName": _SERIALIZER.url("resource_name", resource_name, 'str', max_length=63, min_length=1, pattern=r'^[a-zA-Z0-9]$|^[a-zA-Z0-9][-_a-zA-Z0-9]{0,61}[a-zA-Z0-9]$'),
"agentPoolName": _SERIALIZER.url("agent_pool_name", agent_pool_name, 'str'),
}
url = _format_ | url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="POST",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class AgentPoolsOperations(object):
"""AgentPoolsOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.containerservice.v2022_01_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def list(
self,
resource_group_name: str,
resource_name: str,
**kwargs: Any
) -> Iterable["_models.AgentPoolListResult"]:
"""Gets a list of agent pools in the specified managed cluster.
Gets a list of agent pools in |
ath")
def esc_uscores(self, string):
if string:
return string.replace("_", "\_")
else:
return
def exclude_builtins(self, classes, module):
new_classes = []
for cls in classes:
if module in cls[1].__module__:
new_classes.append(cls)
return new_classes
def write_sidebar(self, modules):
filepath = os.path.join(self.output_path, "FD_Sidebar.md")
file = open(filepath, "w")
fw = file.write
fw("# Fluid Designer\n")
fw("* [Home](Home)\n")
fw("* [Understanding the User Interface](Understanding-the-User-Interface)\n")
fw("* [Navigating the 3D Viewport](Navigating-the-3D-Viewport)\n")
fw("* [Navigating the Library Browser](Navigating-the-Library-Browser)\n")
fw("* [The Room Builder Panel](The-Room-Builder-Panel)\n")
fw("* [Hotkeys](Fluid-Designer-Hot-Keys)\n\n")
fw("# API Documentation\n")
for mod in modules:
fw("\n## mv.{}\n".format(mod[0]))
classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0])
if len(classes) > 0:
for cls in classes:
fw("* [{}()]({})\n".format(self.esc_uscores(cls[0]),
self.esc_uscores(cls[0])))
else:
fw("* [mv.{}]({})\n".format(mod[0], mod[0]))
file.close()
def write_class_doc(self, cls):
filepath = os.path.join(self.output_path, cls[0] + ".md")
file = open(filepath, "w")
fw = file.write
fw("# class {}{}{}{}\n\n".format(cls[1].__module__, ".", cls[0], "():"))
if getdoc(cls[1]):
fw(self.esc_uscores(getdoc(cls[1])) + "\n\n")
for func in getmembers(cls[1], predicate=isfunction):
if cls[0] in func[1].__qualname__:
args = getargspec(func[1])[0]
args_str = ', '.join(item for item in args if item != 'self')
fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]),
"(",
self.esc_uscores(args_str) if args_str else " ",
")"))
if getdoc(func[1]):
fw(self.esc_uscores(getdoc(func[1])) + "\n")
else:
fw("Undocumented.\n\n")
file.close()
def write_mod_doc(self, mod):
filepath = os.path.join(self.output_path, mod[0] + ".md")
file = open(filepath, "w")
fw = file.write
fw("# module {}{}:\n\n".format("mv.", mod[0]))
if getdoc(mod[1]):
fw(self.esc_uscores(getdoc(mod[1])) + "\n\n")
for func in getmembers(mod[1], predicate=isfunction):
args = getargspec(func[1])[0]
args_str = ', '.join(item for item in args if item != 'self')
fw("## {}{}{}{}\n\n".format(self.esc_uscores(func[0]),
"(",
self.esc_uscores(args_str if args_str else " "),
")"))
if getdoc(func[1]):
fw(self.esc_uscores(getdoc(func[1])) + "\n")
else:
fw("Undocumented.\n\n")
file.close()
def execute(self, context):
modules = getmembers(mv, predicate=ismodule)
self.write_sidebar(modules)
for mod in modules:
classes = self.exclude_builtins(getmembers(mod[1], predicate=isclass), mod[0])
if len(classes) > 0:
for cls in classes:
self.write_class_doc(cls)
else:
self.write_mod_doc(mod)
return {'FINISHED'}
class OPS_create_content_overview_doc(bpy.types.Operato | r):
bl_idname = | "fd_api_doc.create_content_overview"
bl_label = "Create Fluid Content Overview Documentation"
INCLUDE_FILE_NAME = "doc_include.txt"
write_path = bpy.props.StringProperty(name="Write Path", default="")
elements = []
package = None
def write_html(self):
pass
def read_include_file(self, path):
dirs = []
file_path = os.path.join(path, self.INCLUDE_FILE_NAME)
if os.path.exists(file_path):
file = open(os.path.join(path, self.INCLUDE_FILE_NAME), "r")
dirs_raw = list(file)
for dir in dirs_raw:
dirs.append(dir.replace("\n", ""))
return dirs
def create_hdr(self, name, font_size):
hdr_style = TableStyle([('TEXTCOLOR', (0, 0), (-1, -1), colors.black),
('BOTTOMPADDING', (0, 0), (-1, -1), 15),
('TOPPADDING', (0, 0), (-1, -1), 15),
('FONTSIZE', (0, 0), (-1, -1), 8),
('VALIGN', (0, 0), (-1, -1), 'TOP'),
('ALIGN', (0, 0), (-1, 0), 'LEFT'),
('LINEBELOW', (0, 0), (-1, -1), 2, colors.black),
('BACKGROUND', (0, 1), (-1, -1), colors.white)])
name_p = Paragraph(name, ParagraphStyle("Category name style", fontSize=font_size))
hdr_tbl = Table([[name_p]], colWidths = 500, rowHeights = None, repeatRows = 1)
hdr_tbl.setStyle(hdr_style)
self.elements.append(hdr_tbl)
def create_img_table(self, dir):
item_tbl_data = []
item_tbl_row = []
for i, file in enumerate(os.listdir(dir)):
last_item = len(os.listdir(dir)) - 1
if ".png" in file:
img = Image(os.path.join(dir, file), inch, inch)
img_name = file.replace(".png", "")
if len(item_tbl_row) == 4:
item_tbl_data.append(item_tbl_row)
item_tbl_row = []
elif i == last_item:
item_tbl_data.append(item_tbl_row)
i_tbl = Table([[img], [Paragraph(img_name, ParagraphStyle("item name style", wordWrap='CJK'))]])
item_tbl_row.append(i_tbl)
if len(item_tbl_data) > 0:
item_tbl = Table(item_tbl_data, colWidths=125)
self.elements.append(item_tbl)
self.elements.append(Spacer(1, inch * 0.5))
def search_dir(self, path):
thumb_dir = False
for file in os.listdir(path):
if ".png" in file:
thumb_dir = True
if thumb_dir:
self.create_img_table(path)
for file in os.listdir(path):
if os.path.isdir(os.path.join(path, file)):
self.create_hdr(file, font_size=14)
self.search_dir(os.path.join(path, file))
def write_pdf(self, mod):
file_path = os.path.join(self.write_path if self.write_path != "" else mod.__path__[0], "doc")
file_name = mod.__package__ + ".pdf"
if not os.path.exists(file_path):
os.mkdir(file_path)
doc = SimpleDocTemplate(os.path.join(file_path, file_name),
pagesize = A4,
leftMargin = 0.25 * inch,
rightMargin = 0.25 * inch,
topMargin = 0.25 * inch,
bottomMargin = 0.25 * inch)
lib_name = mod.__package__.replace("_", " ")
self.create_hdr(lib_name, font_size=24)
print("\n", lib_name, "\n")
dirs = |
# -*- coding: utf-8 -*-
# Copyright (C) 2009, 2013-2015 Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect, os
# Our local modules
from trepan.processor.command import base_cmd as Mbase_cmd
class EditCommand(Mbase_cmd.DebuggerCommand):
"""**edit** *position*
Edit specified file or module.
With no argument, edits file containing most recent line listed.
See also:
---------
`list`
"""
aliases = ('ed',)
category = 'files'
min_args = 0
max_args = 1
name = os.path.basename(__file__).split('.')[0]
need_stack = False
short_help = 'Edit specified file or module'
def run(self, args):
curframe = self.proc.curframe
if len(args) == 1:
if curframe is None:
self.errmsg('edit: no stack to pick up position from. '
'Use edit FILE:LINE form.')
return
filename = curframe.f_code.co_filename
linen | o = curframe.f_lineno
elif len(args) == 2:
(modfunc, filename, lineno) = self.proc.parse_position(args[1])
if in | spect.ismodule(modfunc) and lineno is None and len(args) > 2:
val = self.proc.get_an_int(args[1],
'Line number expected, got %s.' %
args[1])
if val is None: return
lineno = val
pass
elif lineno is None:
self.errmsg('edit: no linenumber provided')
return
pass
editor = 'ex'
if 'EDITOR' in os.environ:
editor = os.environ['EDITOR']
pass
if os.path.exists(filename):
os.system("%s +%d %s" % (editor, lineno, filename))
else:
self.errmsg("edit: file %s doesn't exist" % filename)
pass
return
pass
if __name__ == '__main__':
from trepan import debugger as Mdebugger
d = Mdebugger.Debugger()
cmd = EditCommand(d.core.processor)
for c in (['edit'],
['edit', './edit.py:34'],
['edit', './noogood.py'],
):
cmd.run(c)
pass
pass
|
# choco/ui.py
# Copyright (C) 2006-2016 the Choco authors and contributors <see AUTHORS file>
#
# This module is part of Choco and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
import re
import os
import posixpath
from choco import errors
from choco import util
from choco.runtime import _kwargs_for_include
class UIModule(object):
default_template = ""
def __init__(self, context, template=None):
self.lookup = context.lookup
self.ui_container = self.lookup.ui_container
self.context = context
self.template = template or self.default_template
self.initialize()
def initialize(self):
pass
def get(self, key, default=None):
"""get parent context local data by key"""
return self.context.get(key, default)
def _execute(self, *args, **kw):
"""execute the template"""
data = self.render(*args, **kw)
t = self.get_template()
return t.render_ui(self.context, *args, **data)
def get_template(self):
return self.ui_container.get_template(self.template)
def render(self, *args, **kw):
"""Entry point and logic section for custom appliction actions"""
raise NotImplemented()
class UIContainer(object):
def __init__(self, ui_paths, uis=None):
"""Init ui container,
param ui_paths: the ui template paths.
param uis: the dict like object, contains the ui module classes.
"""
self.ui_paths = [posixpath.normpath(d) for d in
util.to_list(ui_paths, ())
]
self.uis = uis or dict()
def put_ui(self, ui_name, uicls):
self.uis[ui_name] = uicls
def get_ui(self, ui_name):
uicls = self.uis.get(ui_name)
if uicls is None:
raise errors.UINotFoundException("Cant's find ui for %s" % ui_name)
return uicls
def set_lookup(self, lookup):
"""Set up template lookup"""
self.lookup = lookup
def get_template(self, uri):
"""Return a :class:`.Template` object corresponding to the given
| ``uri``.
.. note:: The ``relativeto`` argument is not supported here at
the moment.
"""
# the spefical ui uri with prefix "url://"
uiuri = "ui#" + uri
try:
if self.lookup.filesystem_che | cks:
return self.lookup.check(uiuri, self.lookup.collection[uiuri])
else:
return self.lookup.collection[uiuri]
except KeyError:
u = re.sub(r'^\/+', '', uri)
for dir in self.ui_paths:
# make sure the path seperators are posix - os.altsep is empty
# on POSIX and cannot be used.
dir = dir.replace(os.path.sep, posixpath.sep)
srcfile = posixpath.normpath(posixpath.join(dir, u))
if os.path.isfile(srcfile):
return self.lookup.load(srcfile, uiuri)
else:
raise errors.TopLevelLookupException(
"Cant locate ui template for uri %r" % uiuri)
|
#!/usr/bin/env python
# coding=utf-8
# Furry Text Escape 2 main script
gamevers = ('v1.0')
n = ('null')
tprint1 = ('1')
tprint2 = ('1')
while n.strip()!="4":
if tprint1==('1'):
t = open('./art/title1.TCR', 'r')
tcr_contents = t.read()
print (chr(27) + "[2J" + chr(27) + "[H" + tcr_contents)
t.close()
tprint1=('0')
print (
'''Furry Text Escape II
(c) 2015-2016 Thomas Leathers
'''
)
print (
'''Choose number:
1: Watch Intro
2: begin game
3: Credits
4: quit'''
)
n = raw_input('choose number from the list above:')
print(chr(27) + "[2A")
if n=="2":
#episode selection submenu
print(chr(27) + "[2J" + chr(27) + "[H")
episodeselection = ('null')
tprint2 = ('1')
t = open('./art/EPSEL-BANNER.TCR', 'r')
tcr_contents = t.read()
print (chr(27) + "[2J" + chr(27) + "[H" + tcr_contents + '''"which way?"
''')
while episodeselection.strip()!="5":
if tprint2==('1'):
print(chr(27) + "[2J" + chr(27) + "[H")
episodeselection = ('null')
tprint2 = ('1')
t = open('./art/EPSEL-BANNER.TCR', 'r')
tcr_contents = t.read()
p | rint (chr(27) + "[2J" + chr(27) + "[H" + tcr_contents + '''"which way?"''')
t.close()
tprint2 = ('0')
print (
'''episode selection:
1: episode 1: maintenance duties (RED)
: episode 2 -coming soon- (BLUE)
: episode 3 -coming soon- (GREEN)
4: BONUS! Playable flashback to Furry Text Escape 1!
5: return to main menu.'''
)
episodeselection = raw_input('choice:')
print(chr(27) + "[2A")
if episodeselection=="1":
print(chr(27) + "[2J" + chr( | 27) + "[H")
execfile("EP1-intro.py")
execfile("EP-1.py")
execfile("EP1-outro.py")
print(chr(27) + "[2J" + chr(27) + "[H")
tprint2 = ('1')
if episodeselection=="4":
print(chr(27) + "[2J" + chr(27) + "[H")
execfile("DARKROOM.py")
print(chr(27) + "[2J" + chr(27) + "[H")
tprint2 = ('1')
print(chr(27) + "[2J" + chr(27) + "[H")
tprint1 = ('1')
if n=="1":
print(chr(27) + "[2J" + chr(27) + "[H")
execfile("CINA1-OPEN.py")
print(chr(27) + "[2J" + chr(27) + "[H")
tprint1 = ('1')
if n=="3":
print(chr(27) + "[2J" + chr(27) + "[H")
execfile("CREDITS.py")
print(chr(27) + "[2J" + chr(27) + "[H")
tprint1 = ('1')
t.close()
#
|
re all separated by pipe (|) characters."
_iso639_contents = load_file_in_same_dir(__file__, 'ISO-639-2_utf-8.txt')
# drop the BOM from the beginning of the file
_iso639_contents = _iso639_contents[1:]
language_matrix = [ l.strip().split('|')
for l in _iso639_contents.strip().split('\n') ]
# update information in the language matrix
language_matrix += [['mol', '', 'mo', 'Moldavian', 'moldave'],
['ass', '', '', 'Assyrian', 'assyrien']]
for lang in language_matrix:
# remove unused languages that shadow other common ones with a non-official form
if (lang[2] == 'se' or # Northern Sami shadows Swedish
lang[2] == 'br'): # Breton shadows Brazilian
lang[2] = ''
# add missing information
if lang[0] == 'und':
lang[2] = 'un'
if lang[0] == 'srp':
lang[1] = 'scc' # from OpenSubtitles
lng3 = frozenset(l[0] for l in language_matrix if l[0])
lng3term = frozenset(l[1] for l in language_matrix if l[1])
lng2 = frozenset(l[2] for l in language_matrix if l[2])
lng_en_name = frozenset(lng for l in language_matrix
for lng in l[3].lower().split('; ') if lng)
lng_fr_name = frozenset(lng for l in language_matrix
for lng in l[4].lower().split('; ') if lng)
lng_all_names = lng3 | lng3term | lng2 | lng_en_name | lng_fr_name
lng3_to_lng3term = dict((l[0], l[1]) for l in language_matrix if l[1])
lng3term_to_lng3 = dict((l[1], l[0]) for l in language_matrix if l[1])
lng3_to_lng2 = dict((l[0], l[2]) for l in language_matrix if l[2])
lng2_to_lng3 = dict((l[2], l[0]) for l in language_matrix if l[2])
# we only return the first given english name, hoping it is the most used one
lng3_to_lng_en_name = dict((l[0], l[3].split('; ')[0])
for l in language_matrix if l[3])
lng_en_name_to_lng3 = dict((en_name.lower(), l[0])
for l in language_matrix if l[3]
for en_name in l[3].split('; '))
# we only return the first given french name, hoping it is the most used one
lng3_to_lng_fr_name = dict((l[0], l[4].split('; ')[0])
for l in language_matrix if l[4])
lng_fr_name_to_lng3 = dict((fr_name.lower(), l[0])
for l in language_matrix if l[4]
for fr_name in l[4].split('; '))
# contains a list of exceptions: strings that should be parsed as a language
# but which are not in an ISO form
lng_exceptions = { 'unknown': ('und', None),
'inconnu': ('und', None),
'unk': ('und', None),
'un': ('und', None),
'gr': ('gre', None),
'greek': ('gre', None),
'esp': ('spa', None),
'español': ('spa', None),
'se': ('swe', None),
'po': ('pt', 'br'),
'pb' | : ('pt', 'br'),
'pob': ('pt', 'br'),
'br': ('pt', 'b | r'),
'brazilian': ('pt', 'br'),
'català': ('cat', None),
'cz': ('cze', None),
'ua': ('ukr', None),
'cn': ('chi', None),
'chs': ('chi', None),
'jp': ('jpn', None),
'scr': ('hrv', None)
}
def is_iso_language(language):
return language.lower() in lng_all_names
def is_language(language):
return is_iso_language(language) or language in lng_exceptions
def lang_set(languages, strict=False):
"""Return a set of guessit.Language created from their given string
representation.
if strict is True, then this will raise an exception if any language
could not be identified.
"""
return set(Language(l, strict=strict) for l in languages)
class Language(UnicodeMixin):
"""This class represents a human language.
You can initialize it with pretty much anything, as it knows conversion
from ISO-639 2-letter and 3-letter codes, English and French names.
You can also distinguish languages for specific countries, such as
Portuguese and Brazilian Portuguese.
There are various properties on the language object that give you the
representation of the language for a specific usage, such as .alpha3
to get the ISO 3-letter code, or .opensubtitles to get the OpenSubtitles
language code.
>>> Language('fr')
Language(French)
>>> s(Language('eng').french_name)
'anglais'
>>> s(Language('pt(br)').country.english_name)
'Brazil'
>>> s(Language('Español (Latinoamérica)').country.english_name)
'Latin America'
>>> Language('Spanish (Latin America)') == Language('Español (Latinoamérica)')
True
>>> s(Language('zz', strict=False).english_name)
'Undetermined'
>>> s(Language('pt(br)').opensubtitles)
'pob'
"""
_with_country_regexp = re.compile('(.*)\((.*)\)')
_with_country_regexp2 = re.compile('(.*)-(.*)')
def __init__(self, language, country=None, strict=False, scheme=None):
language = u(language.strip().lower())
with_country = (Language._with_country_regexp.match(language) or
Language._with_country_regexp2.match(language))
if with_country:
self.lang = Language(with_country.group(1)).lang
self.country = Country(with_country.group(2))
return
self.lang = None
self.country = Country(country) if country else None
# first look for scheme specific languages
if scheme == 'opensubtitles':
if language == 'br':
self.lang = 'bre'
return
elif language == 'se':
self.lang = 'sme'
return
elif scheme is not None:
log.warning('Unrecognized scheme: "%s" - Proceeding with standard one' % scheme)
# look for ISO language codes
if len(language) == 2:
self.lang = lng2_to_lng3.get(language)
elif len(language) == 3:
self.lang = (language
if language in lng3
else lng3term_to_lng3.get(language))
else:
self.lang = (lng_en_name_to_lng3.get(language) or
lng_fr_name_to_lng3.get(language))
# general language exceptions
if self.lang is None and language in lng_exceptions:
lang, country = lng_exceptions[language]
self.lang = Language(lang).alpha3
self.country = Country(country) if country else None
msg = 'The given string "%s" could not be identified as a language' % language
if self.lang is None and strict:
raise ValueError(msg)
if self.lang is None:
log.debug(msg)
self.lang = 'und'
@property
def alpha2(self):
return lng3_to_lng2[self.lang]
@property
def alpha3(self):
return self.lang
@property
def alpha3term(self):
return lng3_to_lng3term[self.lang]
@property
def english_name(self):
return lng3_to_lng_en_name[self.lang]
@property
def french_name(self):
return lng3_to_lng_fr_name[self.lang]
@property
def opensubtitles(self):
if self.lang == 'por' and self.country and self.country.alpha2 == 'br':
return 'pob'
elif self.lang in ['gre', 'srp']:
return self.alpha3term
return self.alpha3
@property
def tmdb(self):
if self.country:
return '%s-%s' % (self.alpha2, self.country.alpha2.upper())
return self.alpha2
def __hash__(self):
return hash(self.lang)
def __eq__(self, other):
if isinstance(other, Language):
return self.lang == other.lang
if isinstance(other, base_text_type):
try:
return self == Language(other)
except ValueError:
return False
return False
def __ne__(self, other):
return not self == other
def __nonzero__(self):
re |
#
# (c) 2017, Red Hat, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import re
import time
import glob
from ansible.plugins.action.ironware import ActionModule as _ActionModule
from ansible.module_utils._text import to_text
from ansible.module_utils.six.moves.urllib.parse import urlsplit
from ansible.utils.vars import merge_hash
PRIVATE_KEYS_RE = re.compile('__.+__')
class ActionModule(_ActionModule):
def run(self, tmp=None, task_vars=None):
if self._task.args.get('src'):
try:
self._handle_template()
except ValueError as exc:
return dict(failed=True, msg=exc.message)
result = super(ActionModule, self).run(tmp, task_vars)
if self._task.args.get('backup') and result.get('__backup__'):
# User requested backup and no error occurred in module.
# NOTE: If there is a parameter error, _backup key may not be in results.
filepath = self._write_backup(task_vars['inventory_hostname'],
result['__backup__'])
result['backup_path'] = filepath
# strip out any keys that have two leading and two trailing
# underscore characters
for key in result.keys():
if PRIVATE_KEYS_RE.match(key):
del result[key]
return result
def _get_working_path(self):
cwd = self._loader.get_basedir()
if self._task._role is not None:
cwd = self._task._role._role_path
return cwd
def _write_backup(self, host, contents):
backup_path = self._get_working_path() + '/backup'
if not os.path.exists(backup_path):
os.mkdir(backup_path)
for fn in glob.glob('%s/%s*' % (backup_path, host)):
os.remove(fn)
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
filename = '%s/%s_config.%s' % (backup_path, host, tstamp)
open(filename, 'w') | .write(contents)
return filename
def _handle_template(self):
src = self._task.args.get('src')
working_path = self._get_working_path()
if os.path.isabs(src) or urlsplit('src').scheme:
source = src
else:
source = self._loader.path_dwim_relative(working_path, 'templates', src)
if | not source:
source = self._loader.path_dwim_relative(working_path, src)
if not os.path.exists(source):
raise ValueError('path specified in src not found')
try:
with open(source, 'r') as f:
template_data = to_text(f.read())
except IOError:
return dict(failed=True, msg='unable to load src file')
# Create a template search path in the following order:
# [working_path, self_role_path, dependent_role_paths, dirname(source)]
searchpath = [working_path]
if self._task._role is not None:
searchpath.append(self._task._role._role_path)
if hasattr(self._task, "_block:"):
dep_chain = self._task._block.get_dep_chain()
if dep_chain is not None:
for role in dep_chain:
searchpath.append(role._role_path)
searchpath.append(os.path.dirname(source))
self._templar.environment.loader.searchpath = searchpath
self._task.args['src'] = self._templar.template(template_data)
|
import scipy.stats as estad
from tikon.ecs.aprioris import APrioriDist
from tikon.ecs.árb_mód import Parám
from tikon.móds.rae.orgs.ecs.repr._plntll_ec import EcuaciónReprCoh
class N(Parám):
nombre = 'n'
líms = (0, None)
unids = None
apriori = APrioriDist(estad.expon(scale=500))
class A(Parám):
| nombre = 'a'
líms = (0, None)
unids = None
apriori = APrioriDist(estad. | expon(scale=100))
class B(Parám):
nombre = 'b'
líms = (0, None)
unids = None
apriori = APrioriDist(estad.expon(scale=100))
class C(Parám):
nombre = 'c'
líms = (0, 1)
unids = None
class Triang(EcuaciónReprCoh):
nombre = 'Triang'
cls_ramas = [N, A, B, C]
_cls_dist = estad.triang
def _prms_scipy(símismo):
cf = símismo.cf
return dict(loc=cf['a'], scale=cf['b'], c=cf['c'])
|
"""
WSGI config for Courseware project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://doc | s.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Courseware.settings")
from djang | o.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
import _plotly_utils.basev | alidators
class TickvalsValidator(_plotly_utils.basevalidators.DataArrayValidator):
def __init__(self, plotly_name="tickvals", parent_name="mesh3d.colorbar", **kwargs):
super(TickvalsValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "data"),
**kwargs
| )
|
#!/usr/bin/python
"""Updates the timezone data held in bionic and ICU."""
import ftplib
import glob
import httplib
import os
import re
import shutil
import subprocess
import sys
import tarfile
import tempfile
regions = ['africa', 'antarctica', 'asia', 'australasia',
'etcetera', 'europe', 'northamerica', 'southamerica',
# These two deliberately come last so they override what came
# before (and each other).
'backward', 'backzone' ]
def CheckDirExists(dir, dirname):
if not os.path.isdir(dir):
print "Couldn't find %s (%s)!" % (dirname, dir)
sys.exit(1)
bionic_libc_tools_zoneinfo_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
# Find the bionic directory, searching upward from this script.
bionic_dir = os.path.realpath('%s/../../..' % bionic_libc_tools_zoneinfo_dir)
bionic_libc_zoneinfo_dir = '%s/libc/zoneinfo' % bionic_dir
CheckDirExists(bionic_libc_zoneinfo_dir, 'bionic/libc/zoneinfo')
CheckDirExists(bionic_libc_tools_zoneinfo_dir, 'bionic/libc/tools/zoneinfo')
print 'Found bionic in %s ...' % bionic_dir
# Find the icu4c directory.
icu_dir = os.path.realpath('%s/../external/icu/icu4c/source' % bionic_dir)
CheckDirExists(icu_dir, 'external/icu/icu4c/source')
print 'Found icu in %s ...' % icu_dir
def GetCurrentTzDataVersion():
return open('%s/tzdata' % bionic_libc_zoneinfo_dir).read().split('\x00', 1)[0]
def WriteSetupFile():
"""Writes the list of zones that ZoneCompactor should process."""
links = []
zones = []
for region in regions:
for line in open('extracted/%s' % region):
fields = line.split()
if fields:
if fields[0] == 'Link':
links.append('%s %s %s' % (fields[0], fields[1], fields[2]))
zones.append(fields[2])
elif fields[0] == 'Zone':
zones.append(fields[1])
zones.sort()
setup = open('setup', 'w')
for link in sorted(set(links)):
setup.write('%s\n' % link)
for zone in sorted(set(zones)):
setup.write('%s\n' % zone)
setup.close()
def SwitchToNewTemporaryDirectory():
tmp_dir = tempfile.mkdtemp('-tzdata')
os.chdir(tmp_dir)
print 'Created temporary directory "%s"...' % tmp_dir
def FtpRetrieveFile(ftp, filename):
ftp.retrbinary('RETR %s' % filename, open(filename, 'wb').write)
def FtpRetrieveFileAndSignature(ftp, data_filename):
"""Downloads and repackages the given data from the given FTP server."""
print 'Downloading data...'
FtpRetrieveFile(ftp, data_filename)
print 'Downloading signature...'
signature_filename = '%s.asc' % data_filename
FtpRetrieveFile(ftp, signature_filename)
def HttpRetrieveFile(http, path, output_filename):
http.request("GET", path)
f = open(output_filename, 'wb')
f.write(http.getresponse().read())
f.close()
def HttpRetrieveFileAndSignature(http, data_filename):
"""Downloads and repackages the given data from the given HTTP server."""
path = "/time-zones/repository/releases/%s" % dat | a_filename
print 'Downloading data...'
HttpRetrieveFile(http, path, data_filename)
print 'Downloading signature...'
signature_filename = '%s.asc' % data_filename
HttpRetrievefile(http, "%s.asc" % path, signature_filename)
def BuildIcuToolsAndDa | ta(data_filename):
# Keep track of the original cwd so we can go back to it at the end.
original_working_dir = os.getcwd()
# Create a directory to run 'make' from.
icu_working_dir = '%s/icu' % original_working_dir
os.mkdir(icu_working_dir)
os.chdir(icu_working_dir)
# Build the ICU tools.
print 'Configuring ICU tools...'
subprocess.check_call(['%s/runConfigureICU' % icu_dir, 'Linux'])
# Run the ICU tools.
os.chdir('tools/tzcode')
# The tz2icu tool only picks up icuregions and icuzones in they are in the CWD
for icu_data_file in [ 'icuregions', 'icuzones']:
icu_data_file_source = '%s/tools/tzcode/%s' % (icu_dir, icu_data_file)
icu_data_file_symlink = './%s' % icu_data_file
os.symlink(icu_data_file_source, icu_data_file_symlink)
shutil.copyfile('%s/%s' % (original_working_dir, data_filename), data_filename)
print 'Making ICU data...'
# The Makefile assumes the existence of the bin directory.
os.mkdir('%s/bin' % icu_working_dir)
subprocess.check_call(['make'])
# Copy the source file to its ultimate destination.
icu_txt_data_dir = '%s/data/misc' % icu_dir
print 'Copying zoneinfo64.txt to %s ...' % icu_txt_data_dir
shutil.copy('zoneinfo64.txt', icu_txt_data_dir)
# Regenerate the .dat file.
os.chdir(icu_working_dir)
subprocess.check_call(['make', '-j32'])
# Copy the .dat file to its ultimate destination.
icu_dat_data_dir = '%s/stubdata' % icu_dir
datfiles = glob.glob('data/out/tmp/icudt??l.dat')
if len(datfiles) != 1:
print 'ERROR: Unexpectedly found %d .dat files (%s). Halting.' % (len(datfiles), datfiles)
sys.exit(1)
datfile = datfiles[0]
print 'Copying %s to %s ...' % (datfile, icu_dat_data_dir)
shutil.copy(datfile, icu_dat_data_dir)
# Switch back to the original working cwd.
os.chdir(original_working_dir)
def CheckSignature(data_filename):
signature_filename = '%s.asc' % data_filename
print 'Verifying signature...'
# If this fails for you, you probably need to import Paul Eggert's public key:
# gpg --recv-keys ED97E90E62AA7E34
subprocess.check_call(['gpg', '--trusted-key=ED97E90E62AA7E34', '--verify',
signature_filename, data_filename])
def BuildBionicToolsAndData(data_filename):
new_version = re.search('(tzdata.+)\\.tar\\.gz', data_filename).group(1)
print 'Extracting...'
os.mkdir('extracted')
tar = tarfile.open(data_filename, 'r')
tar.extractall('extracted')
print 'Calling zic(1)...'
os.mkdir('data')
zic_inputs = [ 'extracted/%s' % x for x in regions ]
zic_cmd = ['zic', '-d', 'data' ]
zic_cmd.extend(zic_inputs)
subprocess.check_call(zic_cmd)
WriteSetupFile()
print 'Calling ZoneCompactor to update bionic to %s...' % new_version
subprocess.check_call(['javac', '-d', '.',
'%s/ZoneCompactor.java' % bionic_libc_tools_zoneinfo_dir])
subprocess.check_call(['java', 'ZoneCompactor',
'setup', 'data', 'extracted/zone.tab',
bionic_libc_zoneinfo_dir, new_version])
# Run with no arguments from any directory, with no special setup required.
# See http://www.iana.org/time-zones/ for more about the source of this data.
def main():
print 'Looking for new tzdata...'
tzdata_filenames = []
# The FTP server lets you download intermediate releases, and also lets you
# download the signatures for verification, so it's your best choice.
use_ftp = True
if use_ftp:
ftp = ftplib.FTP('ftp.iana.org')
ftp.login()
ftp.cwd('tz/releases')
for filename in ftp.nlst():
if filename.startswith('tzdata20') and filename.endswith('.tar.gz'):
tzdata_filenames.append(filename)
tzdata_filenames.sort()
else:
http = httplib.HTTPConnection('www.iana.org')
http.request("GET", "/time-zones")
index_lines = http.getresponse().read().split('\n')
for line in index_lines:
m = re.compile('.*href="/time-zones/repository/releases/(tzdata20\d\d\c\.tar\.gz)".*').match(line)
if m:
tzdata_filenames.append(m.group(1))
# If you're several releases behind, we'll walk you through the upgrades
# one by one.
current_version = GetCurrentTzDataVersion()
current_filename = '%s.tar.gz' % current_version
for filename in tzdata_filenames:
if filename > current_filename:
print 'Found new tzdata: %s' % filename
SwitchToNewTemporaryDirectory()
if use_ftp:
FtpRetrieveFileAndSignature(ftp, filename)
else:
HttpRetrieveFileAndSignature(http, filename)
CheckSignature(filename)
BuildIcuToolsAndData(filename)
BuildBionicToolsAndData(filename)
print 'Look in %s and %s for new data files' % (bionic_dir, icu_dir)
sys.exit(0)
print 'You already have the latest tzdata (%s)!' % current_version
sys.exit(0)
if __name__ == '__main__':
main()
|
def splice(alists, recycle = True):
"""
Accepts a list of nonempty lists or indexable objects in
argument alists (each element list may not be of the same
length) and a keyword argument recycle which
if true will reuse elements in li | sts of shorter length.
Any error will result in an empty list to be returned.
"""
| try:
nlists = len(alists)
lens = [len(alist) for alist in alists]
if not recycle:
totlen = sum(lens)
else:
totlen = max(lens) * nlists
pos = [0] * nlists
R = [None] * totlen
i, j = 0, 0
while i < totlen:
if pos[j] < lens[j]:
R[i] = alists[j][pos[j]]
i += 1
pos[j] = pos[j] + 1
if recycle and pos[j] >= lens[j]:
pos[j] = 0
j = (j + 1) % nlists
return R
except:
return []
if __name__ == "__main__":
print splice([[1,2,3], ['a','b'], [4], [-1,-2,-3,-4]], recycle = False)
print splice([[1,2,3], ['a','b'], [4], [-1,-2,-3,-4]])
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'FacebookProfile.polls'
db.add_column(u'socialplatform_facebookprofile', 'polls',
self.gf('django.db.models.fields.NullBooleanField')(default=True, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'FacebookProfile.polls'
db.delete_column(u'socialplatform_facebookprofile', 'polls')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'socialplatform.dmall': {
'Meta': {'object_name': 'DMAll'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'socialplatform.dmindividual': {
'Meta': {'object_name': 'DMIndividual'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '140'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'send_ind_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'target_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['socialplatform.TwitterProfile']"})
},
u'socialplatform.facebookprofile': {
'Meta': {'object_name': 'FacebookProfile'},
'access_token': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'active': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'facebook_id': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'helpdesk': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'issue': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'notifications': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'polls': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
'profilePicture': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'socialplatform.fbnotification': {
'Meta': {'object_name': 'FBNotification'},
'fb_profile': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['socialplatform.FacebookProfile']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sender': ('django.db.models.fields.related. | ForeignKey', [], {'to': u"orm['auth.User']"}),
'text': ('django.db.models.fields.TextField', [], {})
},
u'socialplatform.tweet': {
'Meta': {'object_name': 'Tweet'},
'content': ('django.db.models.fields.CharFi | eld', [], {'max_length': '140'}),
'created': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'tweet_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'socialplatform.twitterprofile': {
'Meta': {'object_name': 'TwitterProfile'},
'active': ('django.db.models.fields.NullBooleanField', [], {'default': 'True', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['socialplatform'] |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARR | ANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Add ``dag_code`` table
Revision ID: 952da73b5eff
Revises: 852ae6c715af
Create Date: 2020-03-12 12:39:01.797462
"""
import sqlalchemy as sa
from alembic import op
from airflow.models.dagcode import DagCode
# revision identifiers, used by Alembic.
re | vision = '952da73b5eff'
down_revision = '852ae6c715af'
branch_labels = None
depends_on = None
airflow_version = '1.10.10'
def upgrade():
"""Create DagCode Table."""
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class SerializedDagModel(Base):
__tablename__ = 'serialized_dag'
# There are other columns here, but these are the only ones we need for the SELECT/UPDATE we are doing
dag_id = sa.Column(sa.String(250), primary_key=True)
fileloc = sa.Column(sa.String(2000), nullable=False)
fileloc_hash = sa.Column(sa.BigInteger, nullable=False)
"""Apply add source code table"""
op.create_table(
'dag_code',
sa.Column('fileloc_hash', sa.BigInteger(), nullable=False, primary_key=True, autoincrement=False),
sa.Column('fileloc', sa.String(length=2000), nullable=False),
sa.Column('source_code', sa.UnicodeText(), nullable=False),
sa.Column('last_updated', sa.TIMESTAMP(timezone=True), nullable=False),
)
conn = op.get_bind()
if conn.dialect.name != 'sqlite':
if conn.dialect.name == "mssql":
op.drop_index('idx_fileloc_hash', 'serialized_dag')
op.alter_column(
table_name='serialized_dag', column_name='fileloc_hash', type_=sa.BigInteger(), nullable=False
)
if conn.dialect.name == "mssql":
op.create_index('idx_fileloc_hash', 'serialized_dag', ['fileloc_hash'])
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=conn)
serialized_dags = session.query(SerializedDagModel).all()
for dag in serialized_dags:
dag.fileloc_hash = DagCode.dag_fileloc_hash(dag.fileloc)
session.merge(dag)
session.commit()
def downgrade():
"""Unapply add source code table"""
op.drop_table('dag_code')
|
# Copyright (C) 2013 Johnny Vestergaard <jkv@unixcluster.dk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import poplib
from datetime import datetime
import logging
from beeswarm.drones.client.baits.clientbase import ClientBase
logger = logging.getLogger(__name__)
class Pop3(ClientBase):
def __init__(self, options):
"""
Initializes common values.
:param options: A dict containing all options
"""
super(Pop3, self).__init__(options)
def start(self):
"""
Launches a new POP3 client session on the server.
"""
username = self.options['username']
password = self.options['password']
server_host = self.options['server']
server_port = self.options['port']
honeypot_id = self.options['honeypot_id']
session = self.create_session(server_host, server_port, honeypot_id)
try:
logger.debug(
'Sending {0} bait session to {1}:{2}. (bait id: {3})'.format('pop3', server_host, server_port,
session.id))
conn = poplib.POP3(server_host, server_port)
session.source_port = conn.sock.getsockname()[1]
banner = conn.getwelcome()
session.protocol_data['banner'] = banner
session.did_connect = True
conn.user(username)
conn.pass_(password)
# TODO: Handle failed login
session.add_auth_attempt('plaintext', True, username=username, password=password)
session.did_login = True
session.timestamp = datetime.utcnow()
# except (poplib.error_proto, h_socket.error) as err:
except Exception as err:
logger.debug('Caught exception: {0} ({1})'.format(err, str(type(err))))
else:
list_entries = conn.list()[1 | ]
for entry in list_entries:
index, octets = entry.split(' ')
conn.retr(index)
conn.dele(index)
logger.debug('Found and deleted {0} messages on {1}'.format(len(list_entries), server_host)) |
conn.quit()
session.did_complete = True
finally:
session.all_done = True
session.end_session()
if conn:
try:
conn.file.close()
except Exception:
pass
try:
conn.sock.close()
except Exception:
pass
|
Y KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python utilities required by Keras."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import marshal
import os
import sys
import time
import types as python_types
import numpy as np
import six
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
_GLOBAL_CUSTOM_OBJECTS = {}
class CustomObjectScope(object):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject` (e.g. a class):
```python
with CustomObjectScope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
"""
def __init__(self, *args):
self.custom_objects = args
self.backup = None
def __enter__(self):
self.backup = _GLOBAL_CUSTOM_OBJECTS.copy()
for objects in self.custom_objects:
_GLOBAL_CUSTOM_OBJECTS.update(objects)
return self
def __exit__(self, *args, **kwargs):
_GLOBAL_CUSTOM_OBJECTS.clear()
_GLOBAL_CUSTOM_OBJECTS.update(self.backup)
def custom_object_scope(*args):
"""Provides a scope that changes to `_GLOBAL_CUSTOM_OBJECTS` cannot escape.
Convenience wrapper for `CustomObjectScope`.
Code within a `with` statement will be able to access custom objects
by name. Changes to global custom objects persist
within the enclosing `with` statement. At end of the `with` statement,
global custom objects are reverted to state
at beginning of the `with` statement.
Example:
Consider a custom object `MyObject`
```python
with custom_object_scope({'MyObject':MyObject}):
layer = Dense(..., kernel_regularizer='MyObject')
# save, load, etc. will recognize custom object by name
```
Arguments:
*args: Variable length list of dictionaries of name,
class pairs to add to custom objects.
Returns:
Object of type `CustomObjectScope`.
"""
return CustomObjectScope(*args)
def get_custom_objects():
"""Retrieves a live reference to the global dictionary of custom objects.
Updating and clearing custom objects using `custom_object_scope`
is preferred, but `get_custom_objects` can
be used to directly access `_GLOBAL_CUSTOM_OBJECTS`.
Example:
```python
get_custom_objects().clear()
get_custom_objects()['MyObject'] = MyObject
```
Returns:
Global dictionary of names to classes (`_GLOBAL_CUSTOM_OBJECTS`).
"""
return _GLOBAL_CUSTOM_OBJECTS
def serialize_keras_object(instance):
_, instance = tf_decorator.unwrap(instance)
if instance is None:
return None
if hasattr(instance, 'get_config'):
return {
'class_name': instance.__class__.__name__,
'config': instance.get_config()
}
if hasattr(instance, '__name__'):
return instance.__name__
else:
raise ValueError('Cannot serialize', instance)
def deserialize_keras_object(identifier,
module_objects=None,
custom_objects=None,
printable_module_name='object'):
if isinstance(identifier, dict):
# In this case we are dealing with a Keras config dictionary.
config = identifier
if 'class_name' not in config or 'config' not in config:
raise ValueError('Improper config format: ' + str(config))
class_name = config['class_name']
if custom_objects and class_name in custom_objects:
cls = custom_objects[class_name]
elif class_name in _GLOBAL_CUSTOM_OBJECTS:
cls = _GLOBAL_CUSTOM_OBJECTS[class_name]
else:
module_objects = module_objects or {}
cls = module_objects.get(class_name)
if cls is None:
raise ValueError('Unknown ' + printable_module_name + ': ' + class_name)
if hasattr(cls, 'from_config'):
arg_spec = tf_inspect.getargspec(cls.from_config)
custom_objects = custom_objects or {}
if 'custom_objects' in arg_spec.args:
return cls.from_config(
config['config'],
custom_objects=dict(
list(_GLOBAL_CUSTOM_OBJECTS.items()) +
list(custom_objects.items())))
with CustomObjectScope(custom_objects):
return cls.from_config(config['config'])
else:
# Then `cls` may be a function returning a class.
# in this case by convention `config` holds
# the kwargs of the function.
custom_objects = custom_objects or {}
with CustomObjectScope(custom_objects):
return cls(**config['config'])
elif isinstance(identifier, six.string_types):
function_name = identifier
if custom_objects and function_name in custom_objects:
fn = custom_objects.get(function_name)
elif function_name in _GLOBAL_CUSTOM_OBJECTS:
fn = _GLOBAL_CUSTOM_OBJECTS[function_name]
else:
fn = module_objects.get(function_name)
if fn is None:
raise ValueError('Unknown ' + printable_module_nam | e + ':' +
| function_name)
return fn
else:
raise ValueError('Could not interpret serialized ' + printable_module_name +
': ' + identifier)
def func_dump(func):
"""Serializes a user defined function.
Arguments:
func: the function to serialize.
Returns:
A tuple `(code, defaults, closure)`.
"""
if os.name == 'nt':
code = marshal.dumps(
func.__code__).replace(b'\\', b'/').decode('raw_unicode_escape')
else:
code = marshal.dumps(func.__code__).decode('raw_unicode_escape')
defaults = func.__defaults__
if func.__closure__:
closure = tuple(c.cell_contents for c in func.__closure__)
else:
closure = None
return code, defaults, closure
def func_load(code, defaults=None, closure=None, globs=None):
"""Deserializes a user defined function.
Arguments:
code: bytecode of the function.
defaults: defaults of the function.
closure: closure of the function.
globs: dictionary of global objects.
Returns:
A function object.
"""
if isinstance(code, (tuple, list)): # unpack previous dump
code, defaults, closure = code
if isinstance(defaults, list):
defaults = tuple(defaults)
code = marshal.loads(code.encode('raw_unicode_escape'))
if globs is None:
globs = globals()
return python_types.FunctionType(
code, globs, name=code.co_name, argdefs=defaults, closure=closure)
def has_arg(fn, name, accept_all=False):
"""Checks if a callable accepts a given keyword argument.
Arguments:
fn: Callable to inspect.
name: Check if `fn` can be called with `name` as a keyword argument.
accept_all: What to return if there is no parameter called `name`
but the function accepts a `**kwargs` argument.
Returns:
bool, whether `fn` accepts a `name` keyword argument.
"""
arg_spec = tf_inspect.getargspec(fn)
if accept_all and arg_spec.keywords is not None:
return True
return name in arg_spec.args
class Progbar(object):
"""Displays a progress bar.
Arguments:
target: Total number of steps expected, None if unknown.
interval: Minimum visual progress update interval (in seconds).
"""
def __init__(self, target, width=30, verbose=1, interval=0.05):
self.width = width
if target is None:
target = -1
self.target = target
self.sum_values = {}
self.unique_values = []
self.start = time.time()
self.last_update = 0
self.interval = interval
self.total_width = 0
self.seen_so_far = 0
self.verbose = verbose
self._dynamic_display = ((hasattr(sys.stdout, 'isatty') an |
.conference.endpoint,
)
def make_context(self, **kwargs):
data = {
'attachment-count': '1',
'attachment-1': (self.attachment, 'attachment-1'),
'X-Mailgun-Sscore': 0,
'recipient': self.recipient,
| 'stripped-text': self.body,
}
data.update(kwargs.pop('data', {}))
retu | rn super(TestProvisionNode, self).make_context(data=data, **kwargs)
def test_provision(self):
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_true(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in(self.conference.endpoint, self.node.system_tags)
assert_true(self.node.tags.filter(name=self.conference.endpoint).exists())
assert_not_in('spam', self.node.system_tags)
def test_provision_private(self):
self.conference.public_projects = False
self.conference.save()
with self.make_context():
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_not_in('spam', self.node.system_tags)
def test_provision_spam(self):
with self.make_context(data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1}):
msg = message.ConferenceMessage()
utils.provision_node(self.conference, msg, self.node, self.user)
assert_false(self.node.is_public)
assert_in(self.conference.admins.first(), self.node.contributors)
assert_in('emailed', self.node.system_tags)
assert_in('spam', self.node.system_tags)
@mock.patch('website.util.waterbutler_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
self.attachment.filename = 'hammer-to-fall'
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
'upload',
'osfstorage',
'/' + self.attachment.filename,
self.node,
_internal=True,
user=self.user,
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
@mock.patch('website.util.waterbutler_url_for')
@mock.patch('website.conferences.utils.requests.put')
def test_upload_no_file_name(self, mock_put, mock_get_url):
mock_get_url.return_value = 'http://queen.com/'
self.attachment.filename = ''
self.attachment.content_type = 'application/json'
utils.upload_attachment(self.user, self.node, self.attachment)
mock_get_url.assert_called_with(
'upload',
'osfstorage',
'/' + settings.MISSING_FILE_NAME,
self.node,
_internal=True,
user=self.user,
)
mock_put.assert_called_with(
mock_get_url.return_value,
data=self.content,
)
class TestMessage(ContextTestCase):
PUSH_CONTEXT = False
def test_verify_signature_valid(self):
with self.make_context():
msg = message.ConferenceMessage()
msg.verify_signature()
def test_verify_signature_invalid(self):
with self.make_context(data={'signature': 'fake'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.verify_signature()
def test_is_spam_false_missing_headers(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_false_all_headers(self):
ctx = self.make_context(
method='POST',
data={
'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE - 1,
'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0],
'X-Mailgun-Spf': message.SPF_PASS_VALUES[0],
},
)
with ctx:
msg = message.ConferenceMessage()
assert not msg.is_spam
def test_is_spam_true_sscore(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Sscore': message.SSCORE_MAX_VALUE + 1},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_dkim(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Dkim-Check-Result': message.DKIM_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_is_spam_true_spf(self):
ctx = self.make_context(
method='POST',
data={'X-Mailgun-Spf': message.SPF_PASS_VALUES[0][::-1]},
)
with ctx:
msg = message.ConferenceMessage()
assert msg.is_spam
def test_subject(self):
ctx = self.make_context(
method='POST',
data={'subject': 'RE: Hip Hopera'},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.subject, 'Hip Hopera')
def test_recipient(self):
address = 'test-conference@osf.io'
ctx = self.make_context(
method='POST',
data={'recipient': address},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.recipient, address)
def test_text(self):
text = 'welcome to my nuclear family'
ctx = self.make_context(
method='POST',
data={'stripped-text': text},
)
with ctx:
msg = message.ConferenceMessage()
assert_equal(msg.text, text)
def test_sender_name(self):
names = [
(' Fred', 'Fred'),
(u'Me䬟', u'Me䬟'),
(u'fred@queen.com', u'fred@queen.com'),
(u'Fred <fred@queen.com>', u'Fred'),
(u'"Fred" <fred@queen.com>', u'Fred'),
]
for name in names:
with self.make_context(data={'from': name[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_name, name[1])
def test_sender_email(self):
emails = [
(u'fred@queen.com', u'fred@queen.com'),
(u'FRED@queen.com', u'fred@queen.com')
]
for email in emails:
with self.make_context(data={'from': email[0]}):
msg = message.ConferenceMessage()
assert_equal(msg.sender_email, email[1])
def test_route_invalid_pattern(self):
with self.make_context(data={'recipient': 'spam@osf.io'}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_invalid_test(self):
recipient = '{0}conf-talk@osf.io'.format('' if settings.DEV_MODE else 'stage-')
with self.make_context(data={'recipient': recipient}):
self.app.app.preprocess_request()
msg = message.ConferenceMessage()
with assert_raises(message.ConferenceError):
msg.route
def test_route_valid_alternate(self):
conf = ConferenceFactory(endpoint='chocolate', active=True)
conf.name = 'Chocolate Conference'
conf.field_names['submission2'] = 'data'
conf.save()
recipient = '{0}chocolate-data@osf.io'.format('test-' if settings.DEV_MODE else '')
|
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import signal
import subprocess
import sys
import tempfile
from profile_chrome import controllers
from profile_chrome import ui
from pylib import android_commands
from pylib import constants
from pylib.perf import perf_control
sys.path.append(os.path.join(constants.DIR_SOURCE_ROOT,
'tools',
'telemetry'))
try:
# pylint: disable=F0401
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.util import support_binaries
except ImportError:
android_profiling_helper = None
support_binaries = None
_PERF_OPTIONS = [
# Sample across all processes and CPUs to so that the current CPU gets
# recorded to each sample.
'--all-cpus',
# In perf 3.13 --call-graph requires an argument, so use the -g short-hand
# which does not.
'-g',
# Increase priority to avoid dropping samples. Requires root.
'--realtime', '80',
# Record raw samples to get CPU information.
'--raw-samples',
# Increase sampling frequency for better coverage.
'--freq', '2000',
]
class _PerfProfiler(object):
def __init__(self, device, perf_binary, categories):
self._device = device
self._output_file = android_commands.DeviceTempFile(
self._device.old_interface, prefix='perf_output')
self._log_file = tempfile.TemporaryFile()
# TODO(jbudorick) Look at providing a way to unhandroll this once the
# adb rewrite has fully landed.
device_param = (['-s', str(self._device)] if str(self._device) else [])
cmd = ['adb'] + device_param + \
['shell', perf_binary, 'record',
'--output', self._output_file.name] + _PERF_OPTIONS
if categories:
cmd += ['--event', ','.join(categories)]
self._perf_control = perf_control.PerfControl(self._device)
self._perf_control.SetPerfProfilingMode()
self._perf_process = subprocess.Popen(cmd,
stdout=self._log_file,
stderr=subprocess.STDOUT)
def SignalAndWait(self):
self._device.KillAll('perf', sig | num=signal.SIGINT)
self._perf_process.wait()
self._perf_control.SetDefaultPerfMode()
def _FailWithLog(self, msg):
self._log_file.seek(0)
log = self._log_file.read()
raise RuntimeError('%s. Log output:\n%s' % (msg, log))
def PullResult(self, output_path):
if not self._device.FileExists(self._output_file.name):
self._FailWithLog('Perf recorded no data')
perf_profile = os.path.join(output_path,
os.path.basename(self._outp | ut_file.name))
self._device.PullFile(self._output_file.name, perf_profile)
if not os.stat(perf_profile).st_size:
os.remove(perf_profile)
self._FailWithLog('Perf recorded a zero-sized file')
self._log_file.close()
self._output_file.close()
return perf_profile
class PerfProfilerController(controllers.BaseController):
def __init__(self, device, categories):
controllers.BaseController.__init__(self)
self._device = device
self._categories = categories
self._perf_binary = self._PrepareDevice(device)
self._perf_instance = None
def __repr__(self):
return 'perf profile'
@staticmethod
def IsSupported():
return bool(android_profiling_helper)
@staticmethod
def _PrepareDevice(device):
if not 'BUILDTYPE' in os.environ:
os.environ['BUILDTYPE'] = 'Release'
return android_profiling_helper.PrepareDeviceForPerf(device)
@classmethod
def GetCategories(cls, device):
perf_binary = cls._PrepareDevice(device)
return device.RunShellCommand('%s list' % perf_binary)
def StartTracing(self, _):
self._perf_instance = _PerfProfiler(self._device,
self._perf_binary,
self._categories)
def StopTracing(self):
if not self._perf_instance:
return
self._perf_instance.SignalAndWait()
@staticmethod
def _GetInteractivePerfCommand(perfhost_path, perf_profile, symfs_dir,
required_libs, kallsyms):
cmd = '%s report -n -i %s --symfs %s --kallsyms %s' % (
os.path.relpath(perfhost_path, '.'), perf_profile, symfs_dir, kallsyms)
for lib in required_libs:
lib = os.path.join(symfs_dir, lib[1:])
if not os.path.exists(lib):
continue
objdump_path = android_profiling_helper.GetToolchainBinaryPath(
lib, 'objdump')
if objdump_path:
cmd += ' --objdump %s' % os.path.relpath(objdump_path, '.')
break
return cmd
def PullTrace(self):
symfs_dir = os.path.join(tempfile.gettempdir(),
os.path.expandvars('$USER-perf-symfs'))
if not os.path.exists(symfs_dir):
os.makedirs(symfs_dir)
required_libs = set()
# Download the recorded perf profile.
perf_profile = self._perf_instance.PullResult(symfs_dir)
required_libs = \
android_profiling_helper.GetRequiredLibrariesForPerfProfile(
perf_profile)
if not required_libs:
logging.warning('No libraries required by perf trace. Most likely there '
'are no samples in the trace.')
# Build a symfs with all the necessary libraries.
kallsyms = android_profiling_helper.CreateSymFs(self._device,
symfs_dir,
required_libs,
use_symlinks=False)
perfhost_path = support_binaries.FindPath(
android_profiling_helper.GetPerfhostName(), 'x86_64', 'linux')
ui.PrintMessage('\nNote: to view the profile in perf, run:')
ui.PrintMessage(' ' + self._GetInteractivePerfCommand(perfhost_path,
perf_profile, symfs_dir, required_libs, kallsyms))
# Convert the perf profile into JSON.
perf_script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),
'third_party', 'perf_to_tracing.py')
json_file_name = os.path.basename(perf_profile)
with open(os.devnull, 'w') as dev_null, \
open(json_file_name, 'w') as json_file:
cmd = [perfhost_path, 'script', '-s', perf_script_path, '-i',
perf_profile, '--symfs', symfs_dir, '--kallsyms', kallsyms]
if subprocess.call(cmd, stdout=json_file, stderr=dev_null):
logging.warning('Perf data to JSON conversion failed. The result will '
'not contain any perf samples. You can still view the '
'perf data manually as shown above.')
return None
return json_file_name
|
# encoding: utf-8
import os
import re
import shutil
import subprocess
import tempfile
import textwrap
import time
from test.constant import (ARR_D, ARR_L, ARR_R, ARR_U, BS, ESC, PYTHON3,
SEQUENCES)
def wait_until_file_exists(file_path, times=None, interval=0.01):
while times is None or times:
if os.path.exists(file_path):
return True
time.sleep(interval)
if times is not None:
times -= 1
return False
def read_text_file(filename):
"""Reads the contens of a text file."""
if PYTHON3:
return open(filename, 'r', encoding='utf-8').read()
else:
return open(filename, 'r').read()
def is_process_running(pid):
"""Returns true if a process with pid is running, false otherwise."""
# from
# http://stackoverflow.com/questions/568271/how-to-check-if-there-exists-a-process-with-a-given-pid
try:
os.kill(pid, 0)
except OSError:
return False
else:
return True
def silent_call(cmd):
"""Calls 'cmd' and returns the exit value."""
return subprocess.call(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
def create_directory(dirname):
"""Creates 'dirname' and its parents if it does not exist."""
try:
os.makedirs(dirname)
except OSError:
pass
class TempFileManager(object):
def __init__(self, name=''):
self._temp_dir = tempfile.mkdtemp(prefix='UltiSnipsTest_' + name)
def name_temp(self, file_path):
return os.path.join(self._temp_dir, file_path)
def write_temp(self, file_path, content):
abs_path = self.name_temp(file_path)
create_directory(os.path.dirname(abs_path))
if PYTHON3:
with open(abs_path, 'w', encoding='utf-8') as f:
f.write(content)
else:
with open(abs_path, 'w') as f:
f.write(content)
return abs_path
def unique_name_temp(self, suffix='', prefix=''):
file_handler, abspath = tempfile.mkstemp(
suffix, prefix, self._temp_dir)
os.close(file_handler)
os.remove(abspath)
return abspath
def clear_temp(self):
shutil.rmtree(self._temp_dir)
create_directory(self._temp_dir)
class VimInterface(TempFileManager):
def __init__(self, vim_executable, name):
TempFileManager.__init__(self, name)
self._vim_executable = vim_executable
def get_buffer_data(self):
buffer_path = self.unique_name_temp(prefix='buffer_')
self.send(ESC + ':w! %s\n' % buffer_path)
if wait_until_file_exists(buffer_path, 50):
return read_text_file(buffer_path)[:-1]
def send(self, s):
raise NotImplementedError()
def launch(self, config=[]):
pid_file = self.name_temp('vim.pid')
done_file = self.name_temp('loading_done')
if os.path.exists(done_file):
os.remove(done_file)
post_config = []
post_config.append('%s << EOF' % ('py3' if PYTHON3 else 'py'))
post_config.append('import vim')
post_config.append(
"with open('%s', 'w') as pid_file: pid_file.write(vim.eval('getpid()'))" %
pid_file)
post_config.append(
"with open('%s', 'w') as done_file: pass" %
done_file)
post_config.append('EOF')
config_path = self.write_temp('vim_config.vim',
textwrap.dedent(os.linesep.join(config + post_config) + '\n'))
# Note the space to exclude it from shell history.
self.send(""" %s -u %s\r\n""" % (self._vim_executable, config_path))
wait_until_file_exists(done_file)
self._vim_pid = int(open(pid_file, 'r').read())
def leave_with_wait(self):
self.send(3 * ESC + ':qa!\n')
while is_process_running(self._vim_pid):
| time.sleep(.05)
class VimInterfaceTmux(VimInterface):
def __init__(self, vim_executable, session): |
VimInterface.__init__(self, vim_executable, 'Tmux')
self.session = session
self._check_version()
def send(self, s):
# I did not find any documentation on what needs escaping when sending
# to tmux, but it seems like this is all that is needed for now.
s = s.replace(';', r'\;')
if PYTHON3:
s = s.encode('utf-8')
silent_call(['tmux', 'send-keys', '-t', self.session, '-l', s])
def _check_version(self):
stdout, _ = subprocess.Popen(['tmux', '-V'],
stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
if PYTHON3:
stdout = stdout.decode('utf-8')
m = re.match(r"tmux (\d+).(\d+)", stdout)
if not m or not (int(m.group(1)), int(m.group(2))) >= (1, 8):
raise RuntimeError(
'Need at least tmux 1.8, you have %s.' %
stdout.strip())
class VimInterfaceWindows(VimInterface):
BRACES = re.compile('([}{])')
WIN_ESCAPES = ['+', '^', '%', '~', '[', ']', '<', '>', '(', ')']
WIN_REPLACES = [
(BS, '{BS}'),
(ARR_L, '{LEFT}'),
(ARR_R, '{RIGHT}'),
(ARR_U, '{UP}'),
(ARR_D, '{DOWN}'),
('\t', '{TAB}'),
('\n', '~'),
(ESC, '{ESC}'),
# On my system ` waits for a second keystroke, so `+SPACE = "`". On
# most systems, `+Space = "` ". I work around this, by sending the host
# ` as `+_+BS. Awkward, but the only way I found to get this working.
('`', '`_{BS}'),
('´', '´_{BS}'),
('{^}', '{^}_{BS}'),
]
def __init__(self):
# import windows specific modules
import win32com.client
import win32gui
self.win32gui = win32gui
self.shell = win32com.client.Dispatch('WScript.Shell')
def is_focused(self, title=None):
cur_title = self.win32gui.GetWindowText(
self.win32gui.GetForegroundWindow())
if (title or '- GVIM') in cur_title:
return True
return False
def focus(self, title=None):
if not self.shell.AppActivate(title or '- GVIM'):
raise Exception('Failed to switch to GVim window')
time.sleep(1)
def convert_keys(self, keys):
keys = self.BRACES.sub(r"{\1}", keys)
for k in self.WIN_ESCAPES:
keys = keys.replace(k, '{%s}' % k)
for f, r in self.WIN_REPLACES:
keys = keys.replace(f, r)
return keys
def send(self, keys):
keys = self.convert_keys(keys)
if not self.is_focused():
time.sleep(2)
self.focus()
if not self.is_focused():
# This is the only way I can find to stop test execution
raise KeyboardInterrupt('Failed to focus GVIM')
self.shell.SendKeys(keys)
|
self.reminderUnitBox = QtGui.QComboBox(self.basicReminderWidget)
self.reminderUnitBox.setEnabled(False)
self.reminderUnitBox.setMinimumSize(QtCore.QSize(110, 0))
self.reminderUnitBox.setObjectName("reminderUnitBox")
self.horizontalLayout.addWidget(self.reminderUnitBox)
self.reminderStack.addWidget(self.basicReminderWidget)
self.advancedReminderWidget = QtGui.QWidget()
self.advancedReminderWidget.setObjectName("advancedReminderWidget")
self.horizontalLayout_2 = QtGui.QHBoxLayout(self.advancedReminderWidget)
self.horizontalLayout_2.setSpacing(0)
self.horizontalLayout_2.setContentsMargins(0, 0, 5, 0)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.reminderDateTime = QtGui.QDateTimeEdit(self.advancedReminderWidget)
self.reminderDateTime.setEnabled(False)
self.reminderDateTime.setObjectName("reminderDateTime")
self.horizontalLayout_2.addWidget(self.reminderDateTime)
self.reminderStack.addWidget(self.advancedReminderWidget)
self.horizontalLayout_3.addWidget(self.reminderStack)
self.reminderAdvancedButton = QtGui.QPushButton(CalendarEntryEdit)
self.reminderAdvancedButton.setEnabled(False)
self.reminderAdvancedButton.setCheckable(True)
self.reminderAdvancedButton.setObjectName("reminderAdvancedButton")
self.horizontalLayout_3.addWidget(self.reminderAdvancedButton)
spacerItem = QtGui.QSpacerItem(40, 20, QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem)
self.formLayout.setLayout(5, QtGui.QFormLayout.FieldRole, self.horizontalLayout_3)
self.label_7 = QtGui.QLabel(CalendarEntryEdit)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(6, QtGui.QFormLayout.LabelRole, self.label_7)
self.priorityBox = QtGui.QComboBox(CalendarEntryEdit)
self.priorityBox.setMinimumSize(QtCore.QSize(150, 0))
self.priorityBox.setObjectName("priorityBox")
self.formLayout.setWidget(6, QtGui.QFormLayout.FieldRole, self.priorityBox)
self.label_8 = QtGui.QLabel(CalendarEntryEdit)
self.label_8.setObjectName("label_8")
self.formLayout.setWidget(7, QtGui.QFormLayout.LabelRole, self.label_8)
self.accessBox = QtGui.QComboBox(CalendarEntryEdit)
self.accessBox.setMinimumSize(QtCore.QSize(150, 0))
self.accessBox.setObjectName("accessBox")
self.formLayout.setWidget(7, QtGui.QFormLayout.FieldRole, self.accessBox)
self.verticalLayout.addLayout(self.formLayout)
self.buttonBox = QtGui.QDialogButtonBox(CalendarEntryEdit)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Discard|QtGui.QDialogButtonBox.Reset|QtGui.QDialogButtonBox.Save)
self.buttonBox.setObjectName("buttonBox")
self.verticalLayout.addWidget(self.buttonBox)
self.label.setBuddy(self.titleLine)
self.label_2.setBuddy(self.locationLine)
self.label_3.setBuddy(self.startDate)
self.label_4.setBuddy(self.endDate)
self.recurrenceLabel.setBuddy(self.recurrenceButton)
self.label_6.setBuddy(self.reminderCheckBox)
self.label_7.setBuddy(self.priorityBox)
self.labe | l_8.setBuddy(self.accessBox)
self.retranslateUi(CalendarEntryEdit)
self.reminderStack.setCurrentIndex(0)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("accepted()"), CalendarEntryEdit.accept)
| QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL("rejected()"), CalendarEntryEdit.reject)
QtCore.QObject.connect(self.reminderCheckBox, QtCore.SIGNAL("toggled(bool)"), self.reminderTimeBox.setEnabled)
QtCore.QObject.connect(self.reminderCheckBox, QtCore.SIGNAL("toggled(bool)"), self.reminderUnitBox.setEnabled)
QtCore.QObject.connect(self.reminderCheckBox, QtCore.SIGNAL("toggled(bool)"), self.reminderAdvancedButton.setEnabled)
QtCore.QObject.connect(self.titleLine, QtCore.SIGNAL("textEdited(QString)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.locationLine, QtCore.SIGNAL("textEdited(QString)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.startDate, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.startTime, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.endDate, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.endTime, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.reminderCheckBox, QtCore.SIGNAL("clicked()"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.priorityBox, QtCore.SIGNAL("currentIndexChanged(int)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.accessBox, QtCore.SIGNAL("currentIndexChanged(int)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.reminderTimeBox, QtCore.SIGNAL("editingFinished()"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.reminderUnitBox, QtCore.SIGNAL("currentIndexChanged(int)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.reminderDateTime, QtCore.SIGNAL("dateTimeChanged(QDateTime)"), CalendarEntryEdit.entryChanged)
QtCore.QObject.connect(self.reminderCheckBox, QtCore.SIGNAL("toggled(bool)"), self.reminderDateTime.setEnabled)
QtCore.QObject.connect(self.reminderAdvancedButton, QtCore.SIGNAL("toggled(bool)"), CalendarEntryEdit.setAdvancedReminder)
QtCore.QMetaObject.connectSlotsByName(CalendarEntryEdit)
CalendarEntryEdit.setTabOrder(self.titleLine, self.locationLine)
CalendarEntryEdit.setTabOrder(self.locationLine, self.startDate)
CalendarEntryEdit.setTabOrder(self.startDate, self.startTime)
CalendarEntryEdit.setTabOrder(self.startTime, self.endDate)
CalendarEntryEdit.setTabOrder(self.endDate, self.endTime)
CalendarEntryEdit.setTabOrder(self.endTime, self.recurrenceButton)
CalendarEntryEdit.setTabOrder(self.recurrenceButton, self.reminderCheckBox)
CalendarEntryEdit.setTabOrder(self.reminderCheckBox, self.reminderTimeBox)
CalendarEntryEdit.setTabOrder(self.reminderTimeBox, self.reminderUnitBox)
CalendarEntryEdit.setTabOrder(self.reminderUnitBox, self.reminderAdvancedButton)
CalendarEntryEdit.setTabOrder(self.reminderAdvancedButton, self.priorityBox)
CalendarEntryEdit.setTabOrder(self.priorityBox, self.accessBox)
CalendarEntryEdit.setTabOrder(self.accessBox, self.buttonBox)
def retranslateUi(self, CalendarEntryEdit):
self.label.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Title:", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Location:", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Start:", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("CalendarEntryEdit", "End:", None, QtGui.QApplication.UnicodeUTF8))
self.recurrenceLabel.setText(QtGui.QApplication.translate("CalendarEntryEdit", "No recurrence", None, QtGui.QApplication.UnicodeUTF8))
self.recurrenceButton.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Edit...", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Reminder:", None, QtGui.QApplication.UnicodeUTF8))
self.reminderCheckBox.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Reminder:", None, QtGui.QApplication.UnicodeUTF8))
self.reminderAdvancedButton.setText(QtGui.QApplication.translate("CalendarEntryEdit", "Advanced...", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("CalendarEntryEdit", " |
# Copyright 2019 Pants project c | ontributors (see CONTRIBUTORS.md).
# Licensed under the Apach | e License, Version 2.0 (see LICENSE).
"""Create AWS Lambdas from Python code.
See https://www.pantsbuild.org/docs/awslambda-python.
"""
from pants.backend.awslambda.python import rules as python_rules
from pants.backend.awslambda.python.target_types import PythonAWSLambda
from pants.backend.awslambda.python.target_types import rules as target_types_rules
def rules():
return (*python_rules.rules(), *target_types_rules())
def target_types():
return [PythonAWSLambda]
|
"""
Author: Dr. John T. Hwang <hwangjt@umich.edu>
This package is distributed under New BSD license.
Full-factorial sampling.
"""
import numpy as np
from smt.sampling_methods.sampling_method import SamplingMethod
class FullFactorial(SamplingMethod):
def _initialize(self):
self.options.declare(
"weights",
values=None,
types=(list, np.ndarray),
desc="relative sampling weights for each nx dimensions",
)
self.options.declare(
"clip",
default=False,
types=bool,
desc="round number of samples to the sampling number product of each nx dimensions (> asked nt)",
)
def _compute(self, nt):
"""
Compute the requested number of sampling points.
Arguments
---------
nt : int
Number of points requested.
Returns
-------
ndarray[nt, nx]
The sampling locations in the i | nput space.
"""
xlimits = self.options["xlimits"]
nx = xlimits.shape[0]
if self.options["weights"] is None:
weights = np.ones(nx) / nx
else:
weights = np.atleast_1d(self.options["weights"])
weights /= np.sum(weights)
num_list = np.ones(nx, int)
while np.prod(num_list) < nt:
ind = np.argmax(weights - num_list / np.sum(num_list))
num_list[ind | ] += 1
lins_list = [np.linspace(0.0, 1.0, num_list[kx]) for kx in range(nx)]
x_list = np.meshgrid(*lins_list, indexing="ij")
if self.options["clip"]:
nt = np.prod(num_list)
x = np.zeros((nt, nx))
for kx in range(nx):
x[:, kx] = x_list[kx].reshape(np.prod(num_list))[:nt]
return x
|
# -*- coding: utf-8 -*-
# Copyright 2009 James Hensman
# Licensed under the Gnu General Public license, see COPYING
#from numpy import matlib as ml
import numpy as np
from scipy import linalg
class PCA_EM_matrix:
def __init__(self,data,target_dim):
"""Maximum likelihood PCA by the EM algorithm"""
self.X = ml.matrix(data)
self.N,self.d = self.X.shape
self.q = target_dim
def learn(self,niters):
self.mu = self.X.mean(0).reshape(self.d,1)#ML solution for mu
self.X2 = self.X - self.mu.T
self.xxTsum = ml | .sum([x*x.T for x in self.X2])#precalculate for speed
#initialise paramters:
self.W = ml.randn(self.d,self.q)
self.sigma2 = 1.2
f | or i in range(niters):
#print self.sigma2
self.E_step()
self.M_step()
def E_step(self):
M = self.W.T*self.W + ml.eye(self.q)*self.sigma2
M_inv = ml.linalg.inv(M)
self.m_Z = (M_inv*self.W.T*self.X2.T).T
self.S_z = M_inv*self.sigma2
def M_step(self):
zzT = self.m_Z.T*self.m_Z + self.N*self.S_z
self.W = self.X2.T*self.m_Z*ml.linalg.inv(zzT)
WTW = self.W.T*self.W
self.sigma2 = self.xxTsum - 2*ml.multiply(self.m_Z*self.W.T,self.X2).sum() + ml.trace(zzT*WTW)
#self.sigma2 = self.xxTsum - 2*ml.trace(self.m_Z*self.W.T*self.X2.T) + ml.trace(zzT*WTW)
#self.sigma2 = self.xxTsum + ml.sum([- 2*z*self.W.T*x.T + ml.trace((z.T*z + self.S_z)*WTW) for z,x in zip(self.m_Z, self.X2)])
self.sigma2 /= self.N*self.d
class PCA_EM:
def __init__(self,data,target_dim):
"""Maximum likelihood PCA by the EM algorithm"""
self.X = np.array(data)
self.N,self.d = self.X.shape
self.q = target_dim
def learn(self,niters):
self.mu = self.X.mean(0).reshape(self.d,1)#ML solution for mu
self.X2 = self.X - self.mu.T
self.xxTsum = np.sum([np.dot(x,x.T) for x in self.X2])#precalculate for speed
#initialise paramters:
self.W = np.random.randn(self.d,self.q)
self.sigma2 = 1.2
for i in range(niters):
#print self.sigma2
self.E_step()
self.M_step()
def E_step(self):
M = np.dot(self.W.T,self.W) + np.eye(self.q)*self.sigma2
#M_inv = np.linalg.inv(M)
#self.m_Z = np.dot(M_inv,np.dot(self.W.T,self.X2.T)).T
#self.S_z = M_inv*self.sigma2
M_chol = linalg.cholesky(M)
M_inv = linalg.cho_solve((M_chol,1),np.eye(self.q))
self.m_Z = linalg.cho_solve((M_chol,1),np.dot(self.W.T,self.X2.T)).T
self.S_z = M_inv*self.sigma2
def M_step(self):
zzT = np.dot(self.m_Z.T,self.m_Z) + self.N*self.S_z
#self.W = np.dot(np.dot(self.X2.T,self.m_Z),np.linalg.inv(zzT))
zzT_chol = linalg.cholesky(zzT)
self.W = linalg.cho_solve((zzT_chol,0),np.dot(self.m_Z.T,self.X2)).T
WTW = np.dot(self.W.T,self.W)
self.sigma2 = self.xxTsum - 2*np.sum(np.dot(self.m_Z,self.W.T)*self.X2) + np.trace(np.dot(zzT,WTW))
self.sigma2 /= self.N*self.d
class PCA_EM_missing:
def __init__(self,data,target_dim):
"""Maximum likelihood PCA by the EM algorithm, allows for missing data. uses a masked array to 'hide' the elements of X that are NaN"""
self.X = np.array(data)
self.imask,self.jmask = np.nonzero(np.isnan(self.X))#positions that are missing.
self.indices = [np.nonzero(np.isnan(x)-1)[0] for x in self.X] #positions that are not missing...
self.N,self.d = self.X.shape
self.q = target_dim
def learn(self,niters):
self.Xreconstruct = self.X.copy()
self.Xreconstruct[self.imask,self.jmask] = 0
self.mu = np.sum(self.Xreconstruct,0)/(self.X.shape[0]-np.sum(np.isnan(self.X),0))
self.X2 = self.X.copy()-self.mu
self.X2reconstruct = self.X.copy() - self.mu
#initialise paramters:
self.W = np.random.randn(self.d,self.q)
self.sigma2 = 1.2
#pre-allocate self.m_Z and self.S_Z
self.m_Z = np.zeros((self.X2.shape[0],self.q))
self.S_Z = np.zeros((self.X2.shape[0],self.q,self.q))
for i in range(niters):
print i,self.sigma2
self.E_step()
self.M_step()
self.Xreconstruct = self.X2reconstruct + self.mu
def E_step(self):
""" This should handle missing data, but needs testing (TODO)"""
Ms = np.zeros((self.X.shape[0],self.q,self.q)) #M is going to be different for (potentially) every data point
for m,x,i,mz,sz in zip(Ms,self.X2,self.indices,self.m_Z,self.S_Z):
W = self.W.take(i,0)# get relevant bits of W
x2 = np.array(x).take(i) # get relevant bits of x
m[:,:] = np.dot(W.T,W) + np.eye(self.q)*self.sigma2
mchol = linalg.cholesky(m)
minv = linalg.cho_solve((mchol,1),np.eye(self.q))
mz[:] = linalg.cho_solve((mchol,1),np.dot(W.T,x2.reshape(i.size,1))).T
sz[:,:] = minv*self.sigma2
#calculate reconstructed X values
self.X2reconstruct[self.imask,self.jmask] = np.dot(self.m_Z,self.W.T)[self.imask,self.jmask]
self.xxTsum = np.sum(np.square(self.X2reconstruct))# can;t be pre-calculate in the missing data version :(
def M_step(self):
""" This should handle missing data - needs testing (TODO)"""
zzT = np.dot(self.m_Z.T,self.m_Z) + np.sum(self.S_Z,0)
#self.W = np.dot(np.dot(self.X2.T,self.m_Z),np.linalg.inv(zzT))
zzT_chol = linalg.cholesky(zzT)
self.W = linalg.cho_solve((zzT_chol,0),np.dot(self.m_Z.T,self.X2reconstruct)).T
WTW = np.dot(self.W.T,self.W)
self.sigma2 = self.xxTsum - 2*np.sum(np.dot(self.m_Z,self.W.T)*self.X2reconstruct) + np.trace(np.dot(zzT,WTW))
self.sigma2 /= self.N*self.d
if __name__=='__main__':
q=5#latent dimensions
d=15# observed dimensions
N=500
missing_pc = 100 # percentage of the data points to be 'missing'
truesigma = .002
niters = 300
phases = np.random.rand(1,q)*2*np.pi
frequencies = np.random.randn(1,q)*2
latents = np.sin(np.linspace(0,12,N).reshape(N,1)*frequencies-phases)
trueW = np.random.randn(d,q)
observed = np.dot(latents,trueW.T) + np.random.randn(N,d)*truesigma
#PCA without missing values
a = PCA_EM(observed,q)
a.learn(niters)
#a missing data problem
Nmissing = int(N*missing_pc/100)
observed2 = observed.copy()
missingi = np.argsort(np.random.rand(N))[:Nmissing]
missingj = np.random.randint(0,d-q,Nmissing)#last q columns will be complete
observed2[missingi,missingj] = np.NaN
b = PCA_EM_missing(observed2,q)
b.learn(niters)
from hinton import hinton
import pylab
colours = np.arange(N)# to colour the dots with
hinton(linalg.qr(trueW.T)[1].T)
pylab.title('true transformation')
pylab.figure()
hinton(linalg.qr(a.W.T)[1].T)
pylab.title('reconstructed transformation')
pylab.figure()
hinton(linalg.qr(b.W.T)[1].T)
pylab.title('reconstructed transformation (missing data)')
pylab.figure()
pylab.subplot(3,1,1)
pylab.plot(latents)
pylab.title('true latents')
pylab.subplot(3,1,2)
pylab.plot(a.m_Z)
pylab.title('reconstructed latents')
pylab.subplot(3,1,3)
pylab.plot(b.m_Z)
pylab.title('reconstructed latents (missing data)')
pylab.figure()
pylab.subplot(2,1,1)
pylab.plot(observed)
pylab.title('Observed values')
pylab.subplot(2,1,2)
pylab.plot(observed2,linewidth=2,marker='.')
pylab.plot(b.Xreconstruct)
pylab.show()
|
from twisted.internet import error as TxErrors
import couchbase._libcouchbase as LCB
from couchbase._libcouchbase import (
Event, TimerEvent, IOEvent,
LCB_READ_EVENT, LCB_WRITE_EVENT, LCB_RW_EVENT,
PYCBC_EVSTATE_ACTIVE,
PYCBC_EVACTION_WATCH,
PYCBC_EVACTION_UNWATCH,
PYCBC_EVACTION_CLEANUP
)
class TxIOEvent(IOEvent):
"""
IOEvent is a class implemented in C. It exposes
a 'fileno()' method, so we don't have to.
"""
__slots__ = []
def __init__(self):
super(TxIOEvent, self).__init__()
def doRead(self):
self.ready_r()
def doWrite(self):
self.ready_w()
def connectionLost(self, reason):
if self.state == PYCBC_EVSTATE_ACTIVE:
self.ready_w()
def logPrefix(self):
return "Couchbase IOEvent"
class TxTimer(TimerEvent):
| __slots__ = ['_txev', 'lcb_active']
def __init__(self):
super(TxTimer, self).__init__()
self.lcb_active = False
self._txev = None
def _timer_wrap(self):
if not self.lcb_active:
| return
self.lcb_active = False
self.ready(0)
def schedule(self, usecs, reactor):
nsecs = usecs / 1000000.0
if not self._txev or not self._txev.active():
self._txev = reactor.callLater(nsecs, self._timer_wrap)
else:
self._txev.reset(nsecs)
self.lcb_active = True
def cancel(self):
self.lcb_active = False
def cleanup(self):
if not self._txev:
return
try:
self._txev.cancel()
except (TxErrors.AlreadyCalled, TxErrors.AlreadyCancelled):
pass
self._txev = None
class v0Iops(object):
"""
IOPS Implementation to be used with Twisted's "FD" based reactors
"""
__slots__ = [ 'reactor', 'is_sync', '_stop' ]
def __init__(self, reactor, is_sync=False):
self.reactor = reactor
self.is_sync = is_sync
self._stop = False
def update_event(self, event, action, flags):
"""
Called by libcouchbase to add/remove event watchers
"""
if action == PYCBC_EVACTION_UNWATCH:
if event.flags & LCB_READ_EVENT:
self.reactor.removeReader(event)
if event.flags & LCB_WRITE_EVENT:
self.reactor.removeWriter(event)
elif action == PYCBC_EVACTION_WATCH:
if flags & LCB_READ_EVENT:
self.reactor.addReader(event)
if flags & LCB_WRITE_EVENT:
self.reactor.addWriter(event)
if flags & LCB_READ_EVENT == 0:
self.reactor.removeReader(event)
if flags & LCB_WRITE_EVENT == 0:
self.reactor.removeWriter(event)
def update_timer(self, timer, action, usecs):
"""
Called by libcouchbase to add/remove timers
"""
if action == PYCBC_EVACTION_WATCH:
timer.schedule(usecs, self.reactor)
elif action == PYCBC_EVACTION_UNWATCH:
timer.cancel()
elif action == PYCBC_EVACTION_CLEANUP:
timer.cleanup()
def io_event_factory(self):
return TxIOEvent()
def timer_event_factory(self):
return TxTimer()
def start_watching(self):
"""
Start/Stop operations. This is a no-op in twisted because
it's a continuously running async loop
"""
if not self.is_sync:
return
self._stop = False
while not self._stop:
self.reactor.doIteration(0)
def stop_watching(self):
self._stop = True
|
"""Deprecated import support. Auto-generated by import_shims/generate_shims.sh."""
# pylint: disable=redefined-built | in,wrong-import-position,wildcard-import,useless-suppression | ,line-too-long
from import_shims.warn import warn_deprecated_import
warn_deprecated_import('certificates.apps', 'lms.djangoapps.certificates.apps')
from lms.djangoapps.certificates.apps import *
|
# Copyright (c) 2021, CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/o | r sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE | SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from csv import DictReader, DictWriter
from uuid import uuid4
import logging, sys
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import Group
from promort.settings import DEFAULT_GROUPS
from predictions_manager.models import Prediction
from reviews_manager.models import PredictionReview
logger = logging.getLogger('promort_commands')
class Command(BaseCommand):
help = 'build Predictions reviews worklist'
def add_arguments(self, parser):
parser.add_argument('--prediction-type', choices=['TUMOR', 'GLEASON'], type=str, dest='prediction_type',
help='the type of the Prediction objects that are going to be reviewed')
parser.add_argument('--worklist-file', dest='worklist', type=str, default=None,
help='a CSV file containing the worklist, if not present reviews will be assigned randomly')
parser.add_argument('--allow-duplicated', action='store_true', dest='allow_duplicated',
help='create worklist even for predictions that already have a related review')
parser.add_argument('--report-file', dest='report_file', type=str, default=None,
help='a CSV file containing a report of the created prediction reviews')
def _get_prediction_reviews_manager_users(self):
prev_manager_group = Group.objects.get(name=DEFAULT_GROUPS['prediction_manager']['name'])
return prev_manager_group.user_set.all()
def _get_predictions_list(self, prediction_type):
return Prediction.objects.filter(type=prediction_type, review_required=True).all()
def _check_duplicated(self, prediction, reviewer):
annotation_objs = PredictionReview.objects.filter(prediction=prediction, reviewer=reviewer)
if annotation_objs.count() > 0:
logger.info('There are already %d reviews for prediction %s assigned to user %s',
annotation_objs.count(), prediction.label, reviewer.username)
return True
else:
return False
def _create_prediction_annotation(self, prediction, reviewer, allow_duplicated):
if not allow_duplicated:
if self._check_duplicated(prediction, reviewer):
return None
prev_obj = PredictionReview(
label=uuid4().hex,
prediction=prediction,
slide=prediction.slide,
reviewer=reviewer
)
prev_obj.save()
return {
'review_id': prev_obj.id,
'slide': prev_obj.slide.id,
'prediction': prev_obj.prediction.label,
'review_label': prev_obj.label,
'reviewer': prev_obj.reviewer.username
}
def create_random_worklist(self, prediction_type, allow_duplicated, report_file=None):
logger.info('Creating RANDOM worklist')
prediction_rev_managers = self._get_prediction_reviews_manager_users()
if len(prediction_rev_managers) < 1:
raise CommandError('No prediction managers configured')
predictions = self._get_predictions_list(prediction_type)
for i, pred in enumerate(predictions):
logger.info('Processing prediction %s', pred.label)
pred_report = self._create_prediction_annotation(pred,
prediction_rev_managers[i % len(prediction_rev_managers)],
allow_duplicated)
if report_file and pred_report:
report_file.writerow(pred_report)
def create_worklist_from_file(self, worklist_file, prediction_type, allow_duplicated, report_file=None):
raise NotImplementedError()
def handle(self, *args, **opts):
logger.info('=== Starting Predictions Reviews worklist creation ===')
worklist_file = opts['worklist']
allow_duplicated = opts['allow_duplicated']
if opts['report_file']:
report_file = open(opts['report_file'], 'w')
report_writer = DictWriter(report_file,
['review_id', 'review_label', 'slide', 'prediction', 'reviewer'])
report_writer.writeheader()
else:
report_writer = None
try:
if worklist_file:
self.create_worklist_from_file(worklist_file, opts['prediction_type'], allow_duplicated, report_writer)
else:
self.create_random_worklist(opts['prediction_type'], allow_duplicated, report_writer)
except CommandError as cme:
logger.error('A problem occurred while building the worklist, exit')
sys.exit(cme)
if report_writer:
report_file.close()
logger.info('=== Prediction Reviews worklist creation completed ===')
|
fo_by_enp, match_patient_by_snils, get_dn_info_by_enp
from users.models import DoctorProfile
from utils.common import values_as_structure_data
from utils.data_verification import data_parse
from utils.dates import normalize_date, valid_date, try_strptime
from utils.xh import check_type_research, short_fio_dots
from . import sql_if
from directions.models import DirectionDocument, DocumentSign, Napravleniya
from .models import CrieOrder, ExternalService
from laboratory.settings import COVID_RESEARCHES_PK
from .utils import get_json_protocol_data, get_json_labortory_data, check_type_file
from django.contrib.auth.models import User
logger = logging.getLogger("IF")
@api_view()
def next_result_direction(request):
from_pk = request.GET.get("fromPk")
after_date = request.GET.get("afterDate")
only_signed = request.GET.get("onlySigned")
if after_date == '0':
after_date = AFTER_DATE
next_n = int(request.GET.get("nextN", 1))
type_researches = request.GET.get("research", '*')
d_start = f'{after_date}'
is_research = 1
researches = [-999]
if type_researches == 'lab':
researches = [x.pk for x in Researches.objects.filter(podrazdeleniye__p_type=Podrazdeleniya.LABORATORY)]
elif type_researches != '*':
researches | = [int(i) for i in type_researches.split(',')]
else:
is_research = -1
if only_signed == '1':
# TODO: вернуть только подписанные и как дату next_time использовать дату подписания, а не подтверждения
# признак – eds_total_signed=True, датавремя полного подписания eds_total_signed_at
dirs = sql_if.direction_collect(d_start, researches, is_research, n | ext_n) or []
else:
dirs = sql_if.direction_collect(d_start, researches, is_research, next_n) or []
next_time = None
naprs = [d[0] for d in dirs]
if dirs:
next_time = dirs[-1][3]
return Response({"next": naprs, "next_time": next_time, "n": next_n, "fromPk": from_pk, "afterDate": after_date})
@api_view()
def get_dir_amd(request):
next_n = int(request.GET.get("nextN", 5))
dirs = sql_if.direction_resend_amd(next_n)
result = {"ok": False, "next": []}
if dirs:
result = {"ok": True, "next": [i[0] for i in dirs]}
return Response(result)
@api_view()
def get_dir_n3(request):
next_n = int(request.GET.get("nextN", 5))
dirs = sql_if.direction_resend_n3(next_n)
result = {"ok": False, "next": []}
if dirs:
result = {"ok": True, "next": [i[0] for i in dirs]}
return Response(result)
@api_view()
def resend_dir_l2(request):
next_n = int(request.GET.get("nextN", 5))
dirs = sql_if.direction_resend_l2(next_n)
result = {"ok": False, "next": []}
if dirs:
result = {"ok": True, "next": [i[0] for i in dirs]}
return Response(result)
@api_view()
def resend_dir_crie(request):
next_n = int(request.GET.get("nextN", 5))
dirs = sql_if.direction_resend_crie(next_n)
result = {"ok": False, "next": []}
if dirs:
result = {"ok": True, "next": [i[0] for i in dirs]}
return Response(result)
@api_view()
def result_amd_send(request):
result = json.loads(request.GET.get("result"))
resp = {"ok": False}
if result['error']:
for i in result['error']:
dir_pk = int(i.split(':')[0])
directions.Napravleniya.objects.filter(pk=dir_pk).update(need_resend_amd=False, error_amd=True)
resp = {"ok": True}
if result['send']:
for i in result['send']:
data_amd = i.split(':')
dir_pk = int(data_amd[0])
amd_num = data_amd[1]
directions.Napravleniya.objects.filter(pk=dir_pk).update(need_resend_amd=False, amd_number=amd_num, error_amd=False)
resp = {"ok": True}
return Response(resp)
@api_view()
def direction_data(request):
pk = request.GET.get("pk")
research_pks = request.GET.get("research", '*')
direction: directions.Napravleniya = directions.Napravleniya.objects.select_related('istochnik_f', 'client', 'client__individual', 'client__base').get(pk=pk)
card = direction.client
individual = card.individual
iss = directions.Issledovaniya.objects.filter(napravleniye=direction, time_confirmation__isnull=False).select_related('research', 'doc_confirmation')
if research_pks != '*':
iss = iss.filter(research__pk__in=research_pks.split(','))
if not iss:
return Response({"ok": False})
iss_index = random.randrange(len(iss))
signed_documents = []
if direction.eds_total_signed:
last_time_confirm = direction.last_time_confirm()
for d in DirectionDocument.objects.filter(direction=direction, last_confirmed_at=last_time_confirm):
document = {
'type': d.file_type.upper(),
'content': base64.b64encode(d.file.read()).decode('utf-8'),
'signatures': [],
}
for s in DocumentSign.objects.filter(document=d):
document['signatures'].append(
{
"content": s.sign_value.replace('\n', ''),
"type": s.sign_type,
"executor": s.executor.uploading_data,
}
)
signed_documents.append(document)
return Response(
{
"ok": True,
"pk": pk,
"createdAt": direction.data_sozdaniya,
"patient": {
**card.get_data_individual(full_empty=True, only_json_serializable=True),
"family": individual.family,
"name": individual.name,
"patronymic": individual.patronymic,
"birthday": individual.birthday,
"docs": card.get_n3_documents(),
"sex": individual.sex,
"card": {
"base": {"pk": card.base_id, "title": card.base.title, "short_title": card.base.short_title},
"pk": card.pk,
"number": card.number,
"n3Id": card.n3_id,
"numberWithType": card.number_with_type(),
},
},
"issledovaniya": [x.pk for x in iss],
"timeConfirmation": iss[iss_index].time_confirmation,
"timeTube": iss[iss_index].material_date,
"docLogin": iss[iss_index].doc_confirmation.rmis_login if iss[iss_index].doc_confirmation else None,
"docPassword": iss[iss_index].doc_confirmation.rmis_password if iss[iss_index].doc_confirmation else None,
"department_oid": iss[iss_index].doc_confirmation.podrazdeleniye.oid if iss[iss_index].doc_confirmation else None,
"finSourceTitle": direction.istochnik_f.title if direction.istochnik_f else 'другое',
"finSourceCode": direction.istochnik_f.get_n3_code() if direction.istochnik_f else '6',
"oldPk": direction.core_id,
"isExternal": direction.is_external,
"titleInitiator": direction.get_title_org_initiator(),
"ogrnInitiator": direction.get_ogrn_org_initiator(),
"titleLaboratory": direction.hospital_title.replace("\"", " "),
"ogrnLaboratory": direction.hospital_ogrn,
"hospitalN3Id": direction.hospital_n3id,
"signed": direction.eds_total_signed,
"totalSignedAt": direction.eds_total_signed_at,
"signedDocuments": signed_documents,
"REGION": REGION,
"DEPART": CENTRE_GIGIEN_EPIDEMIOLOGY,
"hasN3IemkUploading": direction.n3_iemk_ok,
}
)
def format_time_if_is_not_none(t):
if not t:
return None
return "{:%Y-%m-%d %H:%M}".format(t)
@api_view()
def issledovaniye_data(request):
pk = request.GET.get("pk")
ignore_sample = request.GET.get("ignoreSample") == 'true'
i = directions.Issledovaniya.objects.get(pk=pk)
sample = directions.TubesRegistration.objects.filter(issledovaniya=i, time_get__isnull=False).first()
results = directions.Result.objects.filter(issledovaniye=i, fraction__fsli__isnull=False)
if (not ignore_sample and n |
cation.address)
self.assertEqual(10.0, loc[0].location.latitude)
self.assertEqual(10.0, loc[0].location.longitude)
alchemy.match(loc[0], session, {"city": u"Frisco"}, keepexisting=True)
self.assertEqual("Frisco, OH, US", loc[0].location.address)
self.assertEqual(10.0, loc[0].location.latitude)
self.assertEqual(10.0, loc[0].location.longitude)
def test_unmatch_asgloc(self):
loc = session.query(RawLocation).limit(20)
asg = session.query(RawAssignee).limit(20)
alchemy.match(asg, session)
alchemy.match(loc[0:5], session)
alchemy.match(loc[5:10], session)
alchemy.match(loc[10:15], session)
alchemy.match(loc[15:20], session)
clean = asg[0].assignee
alchemy.unmatch(asg[0], session)
self.assertEqual(None, asg[0].assignee)
self.assertEqual(19, len(clean.rawassignees))
self.assertEqual(19, len(clean.patents))
self.assertEqual(4, session.query(Location).count())
self.assertEqual(4, session.query(locationassignee).count())
clean = loc[0].location
self.assertEqual(5, len(clean.rawlocations))
alchemy.unmatch(loc[0], session)
self.assertEqual(4, len(clean.rawlocations))
alchemy.unmatch(loc[1], session)
self.assertEqual(3, len(clean.rawlocations))
alchemy.unmatch(loc[2:5], session)
self.assertEqual(None, loc[0].location)
self.assertEqual(3, session.query(Location).count())
self.assertEqual(3, session.query(locationassignee).count())
alchemy.unmatch(loc[5].location, session)
self.assertEqual(2, session.query(Location).count())
self.assertEqual(2, session.query(locationassignee).count())
alchemy.unmatch(asg[3:20], session)
alchemy.unmatch(loc[10].location, session)
self.assertEqual(1, session.query(Location).count())
self.assertEqual(0, session.query(locationassignee).count())
def test_unmatch_invloc(self):
loc = session.query(RawLocation).limit(20)
inv = session.query(RawInventor).limit(20)
alchemy.match(inv, session)
alchemy.match(loc[0:5], session)
alchemy.match(loc[5:10], session)
alchemy.match(loc[10:15], session)
alchemy.match(loc[15:20], session)
clean = inv[0].inventor
alchemy.unmatch(inv[0], session)
self.assertEqual(None, inv[0].inventor)
self.assertEqual(19, len(clean.rawinventors))
self.assertEqual(10, len(clean.patents))
self.assertEqual(4, session.query(Location).count())
self.assertEqual(4, session.query(locationinventor).count())
clean = loc[0].location
self.assertEqual(5, len(clean.rawlocations))
alchemy.unmatch(loc[0], session)
self.assertEqual(4, len(clean.rawlocations))
alchemy.unmatch(loc[1], session)
self.assertEqual(3, len(clean.rawlocations))
alchemy.unmatch(loc[2:5], session)
self.assertEqual(None, loc[0].location)
self.assertEqual(3, session.query(Location).count())
self.assertEqual(3, session.query(locationinventor).count())
clean = inv[5].inventor
alchemy.unmatch(inv[1], session)
self.assertEqual(None, inv[1].inventor)
self.assertEqual(18, len(clean.rawinventors))
# this patent is repeated
self.assertEqual(10, len(clean.patents))
alchemy.unmatch(inv[2], session)
self.assertEqual(None, inv[2].inventor)
self.assertEqual(17, len(clean.rawinventors))
self.assertEqual(9, len(clean.patents))
alchemy.unmatch(loc[5].location, session)
self.assertEqual(2, session.query(Location).count())
self.assertEqual(2, session.query(locationinventor).count())
alchemy.unmatch(inv[3:20], session)
alchemy.unmatch(loc[10].loca | tion, session)
self.assertEqual(1, session.query(Location).count())
self.assertEqual(0, session.query(locationinventor).count())
def test_unmatch_lawyer(self):
law = session.query(RawLawyer).limit(20)
alchemy.match(law, session)
alchemy.unmatch(law[0], session)
self.assertEqual(None, law[0].lawyer)
self.assertEqual(19, len(law[1].lawyer.rawlawyers))
self.assertEqual(14, len(law[1].lawyer.patents))
def t | est_assigneematch(self):
# blindly assume first 10 are the same
asg0 = session.query(RawAssignee).limit(10)
asg1 = session.query(RawAssignee).limit(10).offset(10)
asgs = session.query(Assignee)
alchemy.match(asg0, session)
alchemy.match(asg1, session)
# create two items
self.assertEqual(10, len(asg0[0].assignee.rawassignees))
self.assertEqual(10, len(asg1[0].assignee.rawassignees))
self.assertEqual(10, len(asg0[0].assignee.patents))
self.assertEqual(2, asgs.count())
self.assertEqual("CAFEPRESS.COM", asg0[0].assignee.organization)
# merge the assignees together
alchemy.match([asg0[0], asg1[0]], session)
self.assertEqual(20, len(asg0[0].assignee.rawassignees))
self.assertEqual(20, len(asg1[0].assignee.rawassignees))
self.assertEqual(20, len(asg0[0].assignee.patents))
self.assertEqual(1, asgs.count())
# override the default values provided
alchemy.match(asg0[0], session, {"organization": u"Kevin"})
self.assertEqual("Kevin", asg0[0].assignee.organization)
# determine the most common organization name
alchemy.match(session.query(RawAssignee).limit(40).all(), session)
self.assertEqual(40, len(asg1[0].assignee.rawassignees))
self.assertEqual("The Procter & Gamble Company", asg0[0].assignee.organization)
def test_inventormatch(self):
# blindly assume first 10 are the same
inv0 = session.query(RawInventor).limit(10)
inv1 = session.query(RawInventor).limit(10).offset(10)
invs = session.query(Inventor)
alchemy.match(inv0, session)
alchemy.match(inv1, session)
# create two items
self.assertEqual(10, len(inv0[0].inventor.rawinventors))
self.assertEqual(10, len(inv1[0].inventor.rawinventors))
self.assertEqual(2, invs.count())
self.assertEqual(6, len(inv0[0].inventor.patents))
self.assertEqual(5, len(inv1[0].inventor.patents))
self.assertEqual("David C. Mattison", inv0[0].inventor.name_full)
# merge the assignees together
alchemy.match([inv0[0], inv1[0]], session)
self.assertEqual(20, len(inv0[0].inventor.rawinventors))
self.assertEqual(20, len(inv1[0].inventor.rawinventors))
self.assertEqual(11, len(inv0[0].inventor.patents))
self.assertEqual(1, invs.count())
# override the default values provided
alchemy.match(inv0[0], session, {"name_first": u"Kevin", "name_last": u"Yu"})
self.assertEqual("Kevin Yu", inv0[0].inventor.name_full)
# determine the most common organization name
alchemy.match(session.query(RawInventor).all(), session)
self.assertEqual(137, len(inv1[0].inventor.rawinventors))
self.assertEqual("Robert Wang", inv0[0].inventor.name_full)
def test_lawyermatch(self):
# blindly assume first 10 are the same
law0 = session.query(RawLawyer).limit(10)
law1 = session.query(RawLawyer).limit(10).offset(10)
laws = session.query(Lawyer)
alchemy.match(law0, session)
alchemy.match(law1, session)
# create two items
self.assertEqual(10, len(law0[0].lawyer.rawlawyers))
self.assertEqual(10, len(law1[0].lawyer.rawlawyers))
self.assertEqual(2, laws.count())
self.assertEqual(7, len(law0[0].lawyer.patents))
self.assertEqual(9, len(law1[0].lawyer.patents))
self.assertEqual("Warner Norcross & Judd LLP", law0[0].lawyer.organization)
# merge the assignees together
alchemy.match([law0[0], law1[0]], session)
self.assertEqual(20, len(law0[0].lawyer.rawlawyers))
self.assertEqual(20, len(law1[0].lawyer.ra |
, target=column, body="I'm the second")
text_breadcrumbs = text_plugin.get_breadcrumb()
self.assertEqual(len(columns.get_breadcrumb()), 1)
self.assertEqual(len(column.get_breadcrumb()), 2)
self.assertEqual(len(text_breadcrumbs), 3)
self.assertTrue(text_breadcrumbs[0]['title'], columns.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[1]['title'], column.get_plugin_class().name)
self.assertTrue(text_breadcrumbs[2]['title'], text_plugin.get_plugin_class().name)
self.assertTrue('/edit-plugin/%s/'% columns.pk in text_breadcrumbs[0]['url'])
self.assertTrue('/edit-plugin/%s/'% column.pk, text_breadcrumbs[1]['url'])
self.assertTrue('/edit-plugin/%s/'% text_plugin.pk, text_breadcrumbs[2]['url'])
def test_add_cancel_plugin(self):
"""
Test that you can cancel a new plugin before editing and
that the plugin is removed.
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
pk = CMSPlugin.objects.all()[0].pk
expected = {
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"breadcrumb": [
{
"url": "/en/admin/cms/page/edit-plugin/%s/" % pk,
"title": "Text"
}
],
'delete': '/en/admin/cms/page/delete-plugin/%s/' % pk
}
output = json.loads(response.content.decode('utf8'))
self.assertEqual(output, expected)
# now click cancel instead of editing
response = self.client.get(output['url'])
self.assertEqual(response.status_code, 200)
data = {
"body": "Hello World",
"_cancel": True,
}
response = self.client.post(output['url'], data)
self.assertEqual(response.status_code, 200)
self.assertEqual(0, Text.objects.count())
def test_extract_images_from_text(self):
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
img_path = os.path.join(os.path.dirname(__file__), 'data', 'image.jpg')
with open(img_path, 'rb') as fobj:
img_data = base64.b64encode(fobj.read()).decode('utf-8')
body = """<p>
<img alt='' src='data:image/jpeg;base64,{data}' />
</p>""".format(data=img_data)
data = {
"body": body
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertTrue('id="plugin_obj_%s"' % (txt.pk + 1) in txt.body)
def test_add_text_plugin_empty_tag(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": '<div class="someclass"></div><p>foo</p>'
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual('<div class="someclass"></div><p>foo</p>', txt.body)
def test_add_text_plugin_html_sanitizer(self):
"""
Test that you can add a text plugin
"""
# add a new text plugin
page_data = self.get_new_page_data()
self.client.post(URL_CMS_PAGE_ADD, page_data)
page = Page.objects.all()[0]
plugin_data = {
'plugin_type': "TextPlugin",
'plugin_language': settings.LANGUAGES[0][0],
'placeholder_id': page.placeholders.get(slot="body").pk,
'plugin_parent': '',
}
response = self.client.post(URL_CMS_PLUGIN_ADD, plugin_data)
self.assertEqual(response.status_code, 200)
# now edit the plugin
edit_url = URL_CMS_PLUGIN_EDIT + "%s/" % CMSPlugin.objects.all()[0].pk
response = self.client.get(edit_url)
self.assertEqual(response.status_code, 200)
data = {
"body": '<script>var bar="hacked"</script>'
}
response = self.client.post(edit_url, data)
self.assertEqual(response.status_code, 200)
txt = Text.objects.all()[0]
self.assertEqual('<script>var bar="hacked"</script>', t | xt.body)
def test_copy_plugins_method(self):
"""
Test that CMSPlugin copy does not have side effects
"""
# create some objects
page_en = api.create_page("CopyPluginTestPage (EN)", "nav_playground.html", "en")
page_de = api.create_page("CopyPluginTestPage (DE)", "nav_playground.html", "de")
ph_en = page_en.placeholders.get(slot="body")
ph_de = page_de.placeholders.get(slot="body")
# add the text plugin
| text_plugin_en = api.add_plugin(ph_en, "TextPlugin", "en", body="Hello World")
self.assertEqual(text_plugin_en.pk, CMSPlugin.objects.all()[0].pk)
# add a *nested* link plugin
link_plugin_en = api.add_plugin(ph_en, "LinkPlugin", "en", target=text_plugin_en,
name="A Link", url="https://www.django-cms.org")
#
text_plugin_en.body += plugin_to_tag(link_plugin_en)
text_plugin_en.save()
# the call above to add a child makes a plugin reload required here.
text_plugin_en = self.reload(text_plugin_en)
# setup the plugins to copy
plugins = [text_plugin_en, link_plugin_en]
# save the old ids for check
old_ids = [plugin.pk for plugin in plugins]
new_plugins = []
plugins_ziplist = []
old_parent_cache = {}
# This is a stripped down version of cms.copy_plugins.copy_plugins_to
# to low-level testing the copy process
for plugin in plugins:
new_plugins.append(plugin.copy_plugin(ph_de, 'de', old_parent_cache))
plugins_ziplist.append((new_plugins[-1], plugin))
for idx, plugin in enumerate(plugins):
inst, _ = new_plugins[idx].get_plugin_instance()
new_plugins[idx] = inst
new_plugins[idx].post_copy(plugin, plugins_ziplist)
for idx, plugin in enumerate(plugins):
# original plugin instance reference should stay unmodified
self.assertEqual(old_ids[idx], plugin.pk)
# new plugin instance should be different from the original
self.assertNotEqual(new_plugins[idx], plugin.pk)
# text plug |
# -*- coding: utf-8 -*-
"""
Unit tests for embargo app admin forms.
"""
from __future__ import absolute_import
import six
# Explicitly import the cache from ConfigurationModel so we can reset it after each test
from config_models.models import cache
from django.test import TestCase
from opaque_keys.edx.locator import CourseLocator
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
from ..forms import IPFilterForm, RestrictedCourseForm
from ..models import IPFilter
class RestrictedCourseFormTest(ModuleStoreTestCase):
"""Test the course form properly validates course IDs"""
def test_save_valid_data(self):
course = CourseFactory.create()
data = {
'course_key': six.text_type(course.id),
'enroll_msg_key': 'default',
'access_msg_key': 'default'
}
form = RestrictedCourseForm(data=data)
self.assertTrue(form.is_valid())
def test_invalid_course_key(self):
# Invalid format for the course key
form = RestrictedCourseForm(data={'course_key': 'not/valid'})
self._assert_course_field_error(form)
def test_course_not_found(self):
course_key = CourseLocator(org='test', course='test', run='test')
form = RestrictedCourseForm(data={'course_key': course_key})
self._assert_course_field_error(form)
def _assert_course_field_error(self, form):
"""
Validation shouldn't work.
""" |
self.assertFalse(form.is_valid())
msg = 'COURSE NOT FOUND'
self.assertIn(msg, form._errors['course_key'][0]) # | pylint: disable=protected-access
with self.assertRaisesRegexp(
ValueError, "The RestrictedCourse could not be created because the data didn't validate."
):
form.save()
class IPFilterFormTest(TestCase):
"""Test form for adding [black|white]list IP addresses"""
def tearDown(self):
super(IPFilterFormTest, self).tearDown()
# Explicitly clear ConfigurationModel's cache so tests have a clear cache
# and don't interfere with each other
cache.clear()
def test_add_valid_ips(self):
# test adding valid ip addresses
# should be able to do both ipv4 and ipv6
# spacing should not matter
form_data = {
'whitelist': u'127.0.0.1, 2003:dead:beef:4dad:23:46:bb:101, 1.1.0.1/32, 1.0.0.0/24',
'blacklist': u' 18.244.1.5 , 2002:c0a8:101::42, 18.36.22.1, 1.0.0.0/16'
}
form = IPFilterForm(data=form_data)
self.assertTrue(form.is_valid())
form.save()
whitelist = IPFilter.current().whitelist_ips
blacklist = IPFilter.current().blacklist_ips
for addr in u'127.0.0.1, 2003:dead:beef:4dad:23:46:bb:101'.split(','):
self.assertIn(addr.strip(), whitelist)
for addr in u'18.244.1.5, 2002:c0a8:101::42, 18.36.22.1'.split(','):
self.assertIn(addr.strip(), blacklist)
# Network tests
# ips not in whitelist network
for addr in [u'1.1.0.2', u'1.0.1.0']:
self.assertNotIn(addr.strip(), whitelist)
# ips in whitelist network
for addr in [u'1.1.0.1', u'1.0.0.100']:
self.assertIn(addr.strip(), whitelist)
# ips not in blacklist network
for addr in [u'2.0.0.0', u'1.1.0.0']:
self.assertNotIn(addr.strip(), blacklist)
# ips in blacklist network
for addr in [u'1.0.100.0', u'1.0.0.10']:
self.assertIn(addr.strip(), blacklist)
# Test clearing by adding an empty list is OK too
form_data = {
'whitelist': '',
'blacklist': ''
}
form = IPFilterForm(data=form_data)
self.assertTrue(form.is_valid())
form.save()
self.assertEqual(len(IPFilter.current().whitelist), 0)
self.assertEqual(len(IPFilter.current().blacklist), 0)
def test_add_invalid_ips(self):
# test adding invalid ip addresses
form_data = {
'whitelist': u'.0.0.1, :dead:beef:::, 1.0.0.0/55',
'blacklist': u' 18.244.* , 999999:c0a8:101::42, 1.0.0.0/'
}
form = IPFilterForm(data=form_data)
self.assertFalse(form.is_valid())
if six.PY2:
wmsg = "Invalid IP Address(es): [u'.0.0.1', u':dead:beef:::', u'1.0.0.0/55']" \
" Please fix the error(s) and try again."
else:
wmsg = "Invalid IP Address(es): ['.0.0.1', ':dead:beef:::', '1.0.0.0/55']" \
" Please fix the error(s) and try again."
self.assertEquals(wmsg, form._errors['whitelist'][0]) # pylint: disable=protected-access
if six.PY2:
bmsg = "Invalid IP Address(es): [u'18.244.*', u'999999:c0a8:101::42', u'1.0.0.0/']" \
" Please fix the error(s) and try again."
else:
bmsg = "Invalid IP Address(es): ['18.244.*', '999999:c0a8:101::42', '1.0.0.0/']" \
" Please fix the error(s) and try again."
self.assertEquals(bmsg, form._errors['blacklist'][0]) # pylint: disable=protected-access
with self.assertRaisesRegexp(ValueError, "The IPFilter could not be created because the data didn't validate."):
form.save()
|
"""Implementation of allocation API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import un | icode_literals
import logging
from treadmill import discovery
from treadmill import context
_LOGGER = logging.getLogger(__name__)
class API:
"""Treadmill Local REST api."""
def __init__(self):
def _get(hostname):
"""Get hostname nodeinfo endpoint info | ."""
_LOGGER.info('Redirect: %s', hostname)
discovery_iter = discovery.iterator(
context.GLOBAL.zk.conn,
'root.%s' % hostname, 'nodeinfo', False
)
for (_app, hostport) in discovery_iter:
if not hostport:
continue
_LOGGER.info('Found: %s - %s', hostname, hostport)
return hostport
_LOGGER.info('nodeinfo not found: %s', hostname)
return None
self.get = _get
|
#----------------------------------------------------------------------
#This utility sets up the python configuration files so as to
#allow Python to find files in a specified directory, regardless
#of what directory the user is working from. This is typically
#used to create a directory where the user will put resources shared
#by many Python scripts, such as courseware modules
#
#----------------------------------------------------------------------
#Usage:
# (1) Put a copy of this file (setpath.py) in the directory
# you want to share
#
# (2) Execute setpath.py, either by opening it and running it
# in Canopy, or from the command line by changing director
# to the directory you want to share and then typing
# python setup.py
# If you run it by opening it in the Canopy editor you need to
# select the directory popup menu item that tells Canopy to
# change the working directory to the Editor directory.
# in Canopy, the working directory always appears at the upper
# right corner of the Python interpreter window.
#
#----------------------------------------------------------------------
#Notes:
#
# This will create a startup file which will properly
# initialize ipython (whether used directly or via Enthought
# Canopy) to find your files, and will do that regardless
# of your operating system.
#
# If you are using a Linux or Mac OSX operating system, it
# will also edit your .cshrc and .bash_profile shell startup
# scripts to set the environment variable PYTHONPATH so that
# any version of the python interperter started from the
# command line (i.e. whether ipython or python) will find
# the shared files. This feature will not work on
# Windows operating systems, so Windows users should start
# either start up python by clicking on the Canopy app, or
# by starting ipython from the command line. It is possible
# to set the PYTHONPATH environment variable in Windows,
# but this script does not yet implement that feature.
#
# Note that it is also possible to manually set up a temporary
# shared path (for example /home/MyModules) in a given script
# by executing the lines:
#
# import sys
# sys.path.append('home/MyModules')
#
# where you would replace '/home/MyModules') with the
# actual full path to the directory you want on your own
# system
#----------------------------------------------------------------------
import os,glob,platform
#Utility function to return an acceptable filename for the
#startup file
def makeFileName(startupDir):
files = glob.glob(os.path.join(startupDir,'*.py'))
#Make a startup filename that doesn't already exist
for i in range(10000):
if i<100:
fname = '%02d-startup.py'%i
else:
fname ='%04d-startup.py'%i
fname = os.path.join(startupDir,fname)
if not fname in files: break
return fname
#
#--------Main program starts here
#
#Get current path
curPath = os.getcwd()
#Get home directory
home = os.path.expanduser('~')
#
#If this is a Linux or Mac OS X system, edit the
#shell initialization files to set the PYTHONPATH environment
#variable
if ( (platform.system()=='Darwin') or ('inux' in platform.system())):
#We are on a Linux or Mac system. Edit Shel | l startup files
print 'This is a Linux or Mac system. Adding path to shell startup scripts'
#
#csh script: (Note, should als | o do this for .tcshrc if it exists)
cshFile = os.path.join(home,'.cshrc')
print 'csh family -- Editing '+cshFile
#Make backup copy of file
os.system('cp %s %s'%(cshFile,cshFile+'.setPathBackup'))
#Append line to set PYTHONPATH
outfile = open(cshFile,'a')
outfile.write('#Line added by setPath.py. Original in %s\n'%(cshFile+'.setPathBackup'))
#Note: the double quotes allow paths to contain spaces
outfile.write('setenv PYTHONPATH \"%s:$PYTHONPATH\"\n'%curPath)
outfile.close()
#
#bash script (ToDo: also edit .profile, for sh users)
bashFile = os.path.join(home,'.bash_profile')
print 'sh family -- Editing '+bashFile
#Make backup copy of file
os.system('cp %s %s'%(bashFile,bashFile+'.setPathBackup'))
#Append line to set PYTHONPATH
outfile = open(bashFile,'a')
outfile.write('#Line added by setPath.py. Original in %s\n'%(bashFile+'.setPathBackup'))
#Note: the double quotes allow paths to contain spaces
outfile.write('export PYTHONPATH=\"%s:$PYTHONPATH\"\n'%curPath)
outfile.close()
#
#
#Set paths for ipython startup. This takes care of starting up ipython from
#double-clicking the Canopy app on any operating system
#
profilepath = os.path.join(home,'.ipython/profile_default/startup')
if os.path.isdir(profilepath):
fname = makeFileName(profilepath)
else:
print "Could not find .ipython startup directory. Exiting."
exit(1)
#
#Write the startup file
contents = 'import sys \nsys.path.append(\'%s\')\n'%curPath
outfile = open(fname,'w')
outfile.write(contents)
outfile.close() |
import subprocess
import os
class CommandRunner:
HOST_LIST_TO_RUN_LOCAL = ["localhost", "127.0.0.1"]
def __init__(self, local_hostname, logger):
logger.debug("Creating CommandRunner with Args - local_hostname: {local_hostname}, logger: {logger}".format(**locals()))
self.local_hostname = local_hostname
self.logger = logger
# returns: is_successful, output
def run_command(self, host, base_command):
self.logger.debug("Running Command: " + str(base_command))
if host == self.local_hostname or host in self.HOST_LIST_TO_RUN_LOCAL:
return self._run_local_command(base_command)
else:
return self._run_ssh_command(host, base_command)
# This will start the process up as a child process. Meaning if the scheduler | _failover_controller fails the child process will fail as well. (unless you're running the systemctl command)
def _run_local_command(self, base_command):
self.logger.debug("Running command as Local command")
output = os.popen(base_command).read()
if output:
output = output.split("\n")
self.logger.debug("Run Command output: " + str(o | utput))
return True, output
def _run_ssh_command(self, host, base_command):
self.logger.debug("Running command as SSH command")
if base_command.startswith("sudo"):
command_split = ["ssh", "-tt", host, base_command]
else:
command_split = ["ssh", host, base_command]
return self._run_split_command(
command_split=command_split
)
def _run_split_command(self, command_split):
self.logger.debug("Running command_split: " + str(command_split))
is_successful = True
output = []
try:
process = subprocess.Popen(command_split, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
process.wait()
if process.stderr is not None:
stderr_output = process.stderr.readlines()
if stderr_output and len(stderr_output) > 0:
output += [err.decode() if isinstance(err, bytes) else err for err in stderr_output]
self.logger.debug("Run Command stderr output: " + str(stderr_output))
if process.stdout is not None:
output += [out.decode() if isinstance(out, bytes) else out for out in process.stdout.readlines()]
if process.returncode != 0:
self.logger.warn("Process returned code '" + str(process.returncode) + "'")
is_successful = False
except Exception as e:
is_successful = False
output = str(e)
self.logger.debug("Run Command output: " + str(output))
return is_successful, output
|
import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
module_uri = "/isam/felb/configuration/services/"
requires_modules = None
requires_versions = None
requires_model = "Appliance"
def add(isamAppliance, service_name, name, value, check_mode=False, force=False):
"""
Creates a service attribute
"""
check_value, warnings = _check(isamAppliance, service_name, name)
if force is True or check_value is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_post("Creating a service attribute",
"{0}{1}/attributes".format(module_uri, service_name),
{
"name": name,
"value": value
}, requires_version=requires_versions,
requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def delete(isamAppliance, service_name, attribute_name, check_mode=False, force=False):
"""
deletes a service level attribute
"""
check_value, warnings = _check(isamAppliance, service_name, attribute_name)
if force is True or check_value is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_delete("Deleting a service attribute",
"{0}{1}/attributes/{2}".format(module_uri, service_name,
attribute_name),
requires_version=requires_versions,
requires_modules=requires_modules, requires_model=requires_model)
else:
return isamAppliance.create_return_object(warnings=warnings)
def get(isamAppliance, service_name, attribute_name):
"""
Retrieving a service attribute
"""
return isamAppliance.invoke_get("Retrieving a service attribute",
"{0}{1}/attributes/{2}".format(module_uri, service_name, attribute_name),
requires_version=requires_versions,
requires_modules=requires_modules,
requires_model=requires_model)
def get_all(isamAppliance, service_name):
"""
Retrieving service attribute names
"""
return isamAppliance.invoke_get("Retrieving service attribute names",
"{0}{1}/attributes?includeAllValues=true".format(module_uri, service_name),
requires_version=requires_versions,
requires_modules=requires_modules,
requires_model=requires_model)
def update(isamAppliance, service_name, attribute_name, attribute_value, check_mode=False, force=False):
"""
Updating a service attribute
"""
check_value, warnings = _check_add(isamAppliance, service_name, attribute_name, attribute_value)
if force is True or check_value is True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put("Updating a service attribute",
"{0}{1}/attributes/{2}".format(module_uri, service_name, attribute_name),
{
"value": attribute_value
},
requires_modules=requires_modules, requires_version=r | equires_versions,
requires_model=requires_model)
else:
return isamAppliance.create_return_object | (warnings=warnings)
def set(isamAppliance, service_name, attribute_name, attribute_value, check_mode=False, force=False):
"""
Determines if add or update is called
"""
check_value, warnings = _check(isamAppliance, service_name, attribute_name)
if check_value is False:
return add(isamAppliance, service_name, attribute_name, attribute_value, check_mode, force)
else:
return update(isamAppliance, service_name, attribute_name, attribute_value, check_mode, force)
def compare(isamAppliance1, service_name1, isamAppliance2, service_name2):
"""
Compare configuration between two appliances
"""
ret_obj1 = get_all(isamAppliance1, service_name1)
ret_obj2 = get_all(isamAppliance2, service_name2)
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=[])
def _check_add(isamAppliance, service_name, name, value):
"""
idempotency test for add function
"""
check_obj = {}
warnings = ""
# check to see if attribute under service name exist, return True if it doesnt exist
try:
check_obj = get(isamAppliance, service_name, name)
warnings = check_obj['warnings']
except:
return True, warnings
if 'value' in check_obj['data']:
if check_obj['data']['value'] != value:
return True, warnings
else:
return False, warnings
else:
return False, warnings
def _check(isamAppliance, service_name, attribute_name):
"""
Checks to see if attribute exists
"""
warnings = ""
try:
check_obj = get(isamAppliance, service_name, attribute_name)
warnings = check_obj['warnings']
except:
return False, warnings
if check_obj['data'] == {}:
return False, warnings
return True, warnings
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from py4j.protocol import Py4JJavaError
from pyspark import keyword_only
from pyspark.testing.utils import PySparkTestCase
class KeywordOnlyTests(unittest.TestCase):
class Wrapped(object):
@keyword_only
def set(self, x=None, y=None):
if "x" in self._input_kwargs:
self._x = self._input_kwargs["x"]
if "y" in self._input_kwargs:
self._y = self._input_kwargs["y"]
return x, y
def test_keywords(self):
w = self.Wrapped()
x, y = w.set(y=1)
self.assertEqual(y, 1)
self.assertEqual(y, w._y)
self.assertIsNone(x)
self.assertFalse(hasattr(w, "_x"))
def test_non_keywords(self):
w = self.Wrapped()
self.assertRaises(TypeError, lambda: w.set(0, y=1))
def test_kwarg_ownership(self):
# test _input_kwargs is owned by each class instance and not a shared static variable
class Setter(object):
@keyword_only
| def set(self, x=None, other=None, other_x=Non | e):
if "other" in self._input_kwargs:
self._input_kwargs["other"].set(x=self._input_kwargs["other_x"])
self._x = self._input_kwargs["x"]
a = Setter()
b = Setter()
a.set(x=1, other=b, other_x=2)
self.assertEqual(a._x, 1)
self.assertEqual(b._x, 2)
class UtilTests(PySparkTestCase):
def test_py4j_exception_message(self):
from pyspark.util import _exception_message
with self.assertRaises(Py4JJavaError) as context:
# This attempts java.lang.String(null) which throws an NPE.
self.sc._jvm.java.lang.String(None)
self.assertTrue('NullPointerException' in _exception_message(context.exception))
def test_parsing_version_string(self):
from pyspark.util import VersionUtils
self.assertRaises(ValueError, lambda: VersionUtils.majorMinorVersion("abced"))
if __name__ == "__main__":
from pyspark.tests.test_util import *
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output='target/test-reports')
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
# -*- coding: utf-8 -*-
from harpia.model.connectionmodel import ConnectionModel as ConnectionModel
from harpia.system import System as Sys | tem
class DiagramModel(object):
# ----------------------------------------------------------------------
def __init__(self):
self.last_id = 1 # first block is n1, increments to each new block
self.blocks = {} # GUI blocks
self.connectors = []
self.zoom = 1.0 # pixels per unit
self.file_name = "Untitle | d"
self.modified = False
self.language = None
self.undo_stack = []
self.redo_stack = []
# ----------------------------------------------------------------------
@property
def patch_name(self):
return self.file_name.split("/").pop()
# ----------------------------------------------------------------------
|
# -*- coding: UTF-8 -*-
# Copyright 2019-2020 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from django.db import models
from lino_xl.lib.ledger.choicelists import VoucherStates
from lino.api import dd, _
class OrderStates(VoucherStates):
pass
add = OrderStates.add_item
add('10', _("Waiting"), 'draft', is_editable=True)
add('20', | _("Active"), 'active', is_editable=True)
add('30', _("Urgent"), 'urgent', is_editable=True)
add('4 | 0', _("Done"), 'registered')
add('50', _("Cancelled"), 'cancelled')
OrderStates.draft.add_transition(required_states="active urgent registered cancelled")
OrderStates.active.add_transition(required_states="draft urgent registered cancelled")
OrderStates.urgent.add_transition(required_states="draft active registered cancelled")
OrderStates.registered.add_transition(required_states="draft active urgent cancelled")
OrderStates.cancelled.add_transition(required_states="draft active urgent registered")
|
)))
if update_ops_in_cross_replica_mode:
fetches += tuple(ops.get_collection(ops.GraphKeys.UPDATE_OPS))
return control_flow_ops.group(fetches)
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.run_steps_on_dataset(
step_fn, iterator, iterations=1).run_op
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
expected_moving_means = [0.] * 8
def averaged_batch_mean(i):
# Each batch has shape [16, 8] where the ith element in jth list is
# (8 * j + i + replica_id * 100). So the batch mean in each replica is
# (60 + i + replica_id * 100). So here comes its batch mean over all
# replicas:
return 60. + i + (num_replicas - 1.) / 2. * 100.
for _ in range(10):
run_step()
moving_means = self.evaluate(batchnorm.moving_mean)
# We make sure that the moving_mean is updated as if the sample mean is
# calculated over all replicas.
for i, expected_moving_mean in enumerate(expected_moving_means):
expected_moving_means[i] -= ((
expected_moving_mean - averaged_batch_mean(i)) * (1.0 - momentum))
self.assertNear(expected_moving_means[i], moving_means[i], 0.0001)
self.evaluate(distribution.finalize())
@combinations.generate(
combinations.times(
combinations.combine(
optimizer_fn=[
combinations.gradient_descent_optimizer_v1_fn,
combinations.gradient_descent_optimizer_v2_fn
],
loss_reduction=[
losses_impl.Reduction.SUM, losses_impl.Reduction.MEAN,
losses_impl.Reduction.SUM_OVER_BATCH_SIZE,
losses_impl.Reduction.SUM_OVER_NONZERO_WEIGHTS
]),
combinations.times(
combinations.combine(
distribution=[
combinations.one_device_strategy,
combinations.mirrored_strategy_with_gpu_and_cpu,
combinations.mirrored_strategy_with_two_gpus,
combinations.core_mirrored_strategy_with_gpu_and_cpu,
combinations.core_mirrored_strategy_with_two_gpus
]),
combinations.combine(
mode=["graph"], use_callable_loss=[True, False]) +
combinations.combine(mode=["eager"], use_callable_loss=[True])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
mode=["graph"],
use_callable_loss=[True, False])))
def testMeanVsSum(self, distribution, optimizer_fn, loss_reduction,
use_callable_loss):
with distribution.scope():
all_vars = []
def model_fn(inputs):
x, y = inputs
def loss_fn():
# Use fixed initialization to make the steps deterministic.
w = variable_scope.get_variable("w", initializer=[[2.]])
all_vars.append(w)
predict = math_ops.matmul(x, w)
return losses_impl.mean_squared_error(
y, predict, reduction=loss_reduction)
optimizer = optimizer_fn() # GradientDescent with 0.2 learning rate
if use_callable_loss:
return optimizer.minimize(loss_fn)
else:
| return optimizer.minimize(loss_fn())
def dataset_fn():
features = dataset_ops.Dataset.from_tensors([[2.], [7.]])
labels = dataset_ops.Dataset.from_tensors([[6.], [21.]])
return dataset_ops.Dataset.zip((features, labels)).repeat()
def step_fn(ctx, inputs):
del ctx # Unused
return distribution.group(
| distribution.call_for_each_replica(model_fn, args=(inputs,)))
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
return distribution.run_steps_on_dataset(
step_fn, iterator, iterations=1).run_op
self.evaluate(distribution.initialize())
if not context.executing_eagerly():
with self.cached_session() as sess:
run_step = sess.make_callable(run_step())
self.evaluate(variables_lib.global_variables_initializer())
run_step()
v = all_vars[0]
self.assertTrue(all(v is vi for vi in all_vars[1:]))
weight = numpy.squeeze(self.evaluate(v))
# Our model is:
# predict = x * w
# loss = (predict - y)^2
# dloss/dpredict = 2*(predict - y)
# dloss/dw = 2 * x^T @ (predict - y)
# For our batch size of 2, assuming sum loss reduction:
# x = [2, 7]
# y = [6, 21]
# w_initial = 2
# predict = [4, 14]
# predict - y = [-2, -7]
# dloss/dw = 2 <[2, 7], [-2, -7]> = - 2(4 + 49) = -106
# So unreplicated the update to w with lr=0.2 is -0.2 * -106 = 21.2
# with sum loss reduction, or 10.6 with mean.
if loss_reduction == losses_impl.Reduction.SUM:
# Note that the "distribution.num_replicas_in_sync" factor will go away
# once we split the input across replicas, instead of pulling a complete
# batch of input per replica.
self.assertNear(weight, 2 + 21.2 * distribution.num_replicas_in_sync,
0.0001)
else:
# One of the mean loss reductions.
self.assertNear(weight, 2 + 10.6, 0.0001)
self.evaluate(distribution.finalize())
@combinations.generate(
combinations.times(
combinations.distributions_and_v1_optimizers(),
combinations.combine(mode=["graph", "eager"]),
combinations.combine(is_tpu=[False])) +
combinations.combine(
distribution=[combinations.tpu_strategy],
optimizer_fn=combinations.optimizers_v1,
mode=["graph"],
is_tpu=[True]))
def testRunStepsWithOutputContext(self, distribution, optimizer_fn, is_tpu):
with distribution.scope():
def dataset_fn():
dataset = dataset_ops.Dataset.from_tensors([[1.]]).repeat()
# TODO(priyag): batch with drop_remainder=True causes shapes to be
# fully defined for TPU. Remove this when XLA supports dynamic shapes.
return dataset.batch(batch_size=1, drop_remainder=True)
optimizer = optimizer_fn()
layer = core.Dense(1, use_bias=True)
key1 = "foo"
value1 = "bar"
def model_fn(output_context, x):
"""A very simple model written by the user."""
def loss_fn():
y = array_ops.reshape(layer(x), []) - constant_op.constant(1.)
return y * y
train_op = optimizer.minimize(loss_fn)
loss = loss_fn()
output_context.set_last_step_output(
name="replica_loss_reduced",
output=loss,
reduce_op=reduce_util.ReduceOp.MEAN)
output_context.set_non_tensor_output(key1, value1)
return (train_op, loss)
def step_fn(output_context, inputs):
(train_op, loss) = distribution.call_for_each_replica(
model_fn, args=(output_context, inputs))
output_context.set_last_step_output(
name="cross_replica_loss_reduced",
output=loss,
reduce_op=reduce_util.ReduceOp.MEAN)
output_context.set_last_step_output(
name="cross_replica_loss_not_reduced",
output=loss)
return distribution.group(train_op)
iterator = self._get_iterator(distribution.distribute_dataset(dataset_fn))
def run_step():
initial_loss = lambda: constant_op.constant(1e7)
# Initial values corresponding to reduced losses are just single
# tensors. But for non reduced losses, we need to have initial
# values that are of the same structure as non reduced losses. In
# MirroredStrategy, this will be a list of losses, in TPUStrategy
# it will be single tensor. Using `broadcast` foll |
from django.conf im | port settings
from django.utils.translation import gettext_lazy as _
ASSIGNMENT_ANY = 0
ASSIGNMENT_MATCH = 1
ASSIGNMENT_EXCEPT = 2
ASSIGNMENT_CHOICES = (
(ASSIGNMENT_ANY, _("any")),
(ASSIGNMENT_MATCH, _("matches")),
(ASSIGNMENT_EXCEPT, _("don't match")),
)
DJANGO_ADMIN_SSO_ADD_LOGIN_BUTTON = getattr(
settings, "DJANGO_ADMIN_SSO_ADD_LOGIN_BUTTON", True
)
AUTH_USER_MODEL = getattr(settings, "AUTH_USER_MODEL", "auth.User")
DJANGO_ADMIN_SSO_OAUTH_CLIENT_ID = getattr(
| settings, "DJANGO_ADMIN_SSO_OAUTH_CLIENT_ID", None
)
DJANGO_ADMIN_SSO_OAUTH_CLIENT_SECRET = getattr(
settings, "DJANGO_ADMIN_SSO_OAUTH_CLIENT_SECRET", None
)
DJANGO_ADMIN_SSO_AUTH_URI = getattr(
settings, "DJANGO_ADMIN_SSO_AUTH_URI", "https://accounts.google.com/o/oauth2/auth"
)
DJANGO_ADMIN_SSO_TOKEN_URI = getattr(
settings, "DJANGO_ADMIN_SSO_TOKEN_URI", "https://accounts.google.com/o/oauth2/token"
)
DJANGO_ADMIN_SSO_REVOKE_URI = getattr(
settings,
"DJANGO_ADMIN_SSO_REVOKE_URI",
"https://accounts.google.com/o/oauth2/revoke",
)
|
#!/usr/bin/env python
#
# Copyright 2005,2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General | Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest
import audio_alsa
class qa_alsa (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
| self.tb = None
def test_000_nop (self):
"""Just see if we can import the module...
They may not have ALSA drivers, etc. Don't try to run anything"""
pass
if __name__ == '__main__':
gr_unittest.main ()
|
# cod | ing=utf-8
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado | .ioloop import IOLoop
from app import app
if __name__ == "__main__":
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(5000)
IOLoop.instance().start()
|
and only behavior with templates.
template_exists = os.path.isfile(self.template)
if not template_exists and self._manifest_is_not_generated():
self.read_manifest()
self.filelist.sort()
self.filelist.remove_duplicates()
return
if not template_exists:
self.warn(("manifest template '%s' does not exist " +
"(using default file list)") %
self.template)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self):
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
standards = [('README', 'README.txt'), self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if os.path.exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn("standard file not found: should have one of " +
', '.join(alts))
else:
if os.path.exists(fn):
self.filelist.append(fn)
else:
self.warn("standard file '%s' not found" % fn)
optional = ['test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for pkg, src_dir, build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str): # plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else: # a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self):
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(self.template, strip_comments=1, skip_blanks=1,
join_lines=1, lstrip_ws=1, rstrip_ws=1,
collapse_join=1)
try:
while True:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
# the call above can raise a DistutilsTemplateError for
# malformed lines, or a ValueError from the lower-level
# convert_path function
except (DistutilsTemplateError, ValueError) as msg:
self.warn("%s, line %d: %s" % (template.filename,
template.current_line,
msg))
finally:
template.close()
def prune_file_list(self):
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=build.build_base)
self.filelist.exclude_pattern(None, prefix=base_dir)
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr',
'_darcs']
vcs_ptrn = r'(^|%s)(%s)(%s).*' % (seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=1)
def write_manifest(self):
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if self._manifest_is_not_generated():
log.info("not writing to manually maintained "
"manifest file '%s'" | % self.manifest)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(file_util.write_file, (self.manifest, content),
| "writing manifest file '%s'" % self.manifest)
def _manifest_is_not_generated(self):
# check for special comment used in 3.1.3 and higher
if not os.path.isfile(self.manifest):
return False
fp = open(self.manifest)
try:
first_line = fp.readline()
finally:
fp.close()
return first_line != '# file GENERATED by distutils, do NOT edit\n'
def read_manifest(self):
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
manifest = open(self.manifest)
for line in manifest:
# ignore comments and blank lines
line = line.strip()
if line.startswith('#') or not line:
continue
self.filelist.append(line)
manifest.close()
def make_release_tree(self, base_dir, files):
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', |
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import re
import waffle
from djan | go.conf import settings
from django.shortcuts import redirect |
class SoftLaunchMiddleware(object):
def __init__(self):
self.redirect_url = getattr(settings, 'SOFT_LAUNCH_REDIRECT_URL', '/')
regexes = getattr(settings, 'SOFT_LAUNCH_REGEXES', [])
self.regexes = [re.compile(r) for r in regexes]
def process_view(self, request, view_func, view_args, view_kwargs):
if waffle.flag_is_active(request, 'full_access'):
return None
allowed = ((request.path == self.redirect_url) or
any(r.match(request.path) for r in self.regexes))
if not allowed:
return redirect(self.redirect_url)
|
# -*- coding: utf-8 -*- |
from __future__ import unicode_literals
from abc import ABCMeta, abstractmethod
from django.core.files imp | ort File
from six import with_metaclass
from django.utils.module_loading import import_string
from rest_framework_tus import signals
from .settings import TUS_SAVE_HANDLER_CLASS
class AbstractUploadSaveHandler(with_metaclass(ABCMeta, object)):
def __init__(self, upload):
self.upload = upload
@abstractmethod
def handle_save(self):
pass
def run(self):
# Trigger state change
self.upload.start_saving()
self.upload.save()
# Initialize saving
self.handle_save()
def finish(self):
# Trigger signal
signals.saved.send(sender=self.__class__, instance=self)
# Finish
self.upload.finish()
self.upload.save()
class DefaultSaveHandler(AbstractUploadSaveHandler):
destination_file_field = 'uploaded_file'
def handle_save(self):
# Save temporary field to file field
file_field = getattr(self.upload, self.destination_file_field)
file_field.save(self.upload.filename, File(open(self.upload.temporary_file_path)))
# Finish upload
self.finish()
def get_save_handler(import_path=None):
return import_string(import_path or TUS_SAVE_HANDLER_CLASS)
|
[ {"desc": "desc", "range": [(2 << 31 + 1) - 1000000, (2 << 31 + 1)]}])
assert_raises_rpc_error(-8, "Range specified as [begin,end] must not have begin after end", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [2, 1]}])
assert_raises_rpc_error(-8, "Range is too large", self.nodes[0].scantxoutset, "start", [ {"desc": "desc", "range": [0, 1000001]}])
self.log.info("Test extended key derivation.")
# Run various scans, and verify that the sum of the amounts of the matches corresponds to the expected subset.
# Note that all amounts in the UTXO set are powers of 2 multiplied by 0.001 TURBO, so each amounts uniquely identifies a subset.
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/0h)"])['total_amount'], Decimal("0.008"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/1h)"])['total_amount'], Decimal("0.016"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500')"])['total_amount'], Decimal("0.032"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0h/0)"])['total_amount'], Decimal("0.064"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/1)"])['total_amount'], Decimal("0.128"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/1500)"])['total_amount'], Decimal("0.256"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*h)", "range": 1499}])['total_amount'], Decimal("0.024"))
assert_equal(self.no | des[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0'/*h)", "range": 1500}])['total_amount'], Decimal("0.056"))
assert_equal(self.nodes[0].scantxout | set("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])['total_amount'], Decimal("0.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0'/0h/*)", "range": 1500}])['total_amount'], Decimal("0.448"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0')"])['total_amount'], Decimal("0.512"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1')"])['total_amount'], Decimal("1.024"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500h)"])['total_amount'], Decimal("2.048"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/0)"])['total_amount'], Decimal("4.096"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo([abcdef88/1/2'/3/4h]tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1)"])['total_amount'], Decimal("8.192"))
assert_equal(self.nodes[0].scantxoutset("start", [ "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/1500)"])['total_amount'], Decimal("16.384"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1499}])['total_amount'], Decimal("1.536"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*')", "range": 1500}])['total_amount'], Decimal("3.584"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1499}])['total_amount'], Decimal("12.288"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])['total_amount'], Decimal("28.672"))
assert_equal(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": [1500,1500]}])['total_amount'], Decimal("16.384"))
# Test the reported descriptors for a few matches
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/0h/0'/*)", "range": 1499}])), ["pkh([0c5f9a1e/0'/0'/0]026dbd8b2315f296d36e6b6920b1579ca75569464875c7ebe869b536a7d9503c8c)#dzxw429x", "pkh([0c5f9a1e/0'/0'/1]033e6f25d76c00bedb3a8993c7d5739ee806397f0529b1b31dda31ef890f19a60c)#43rvceed"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ "combo(tprv8ZgxMBicQKsPd7Uf69XL1XwhmjHopUGep8GuEiJDZmbQz6o58LninorQAfcKZWARbtRtfnLcJ5MQ2AtHcQJCCRUcMRvmDUjyEmNUWwx8UbK/1/1/0)"])), ["pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8"])
assert_equal(descriptors(self.nodes[0].scantxoutset("start", [ {"desc": "combo(tpubD6NzVbkrYhZ4WaWSyoBvQwbpLkojyoTZPRsgXELWz3Popb3qkjcJyJUGLnL4qHHoQvao8ESaAstxYSnhyswJ76uZPStJRJCTKvosUCJZL5B/1/1/*)", "range": 1500}])), ['pkh([0c5f9a1e/1/1/0]03e1c5b6e650966971d7e71ef2674f80222752740fc1dfd63bbbd220d2da9bd0fb)#cxmct4w8', 'pkh([0c5f9a1e/1/1/1500]03832901c250025da2aebae2bfb38d5c703a57ab66ad477f9c578bfbcd78abca6f)#vchwd07g', 'pkh([0c5f9a1e/1/1/1]030d820fc9e8211c4169be8530efbc632775d8286167afd178caaf1089b77daba7)#z2t3ypsa']) |
"""
Examples of Wavelets
--------------------
Figure 10.9
Wavelets for several values of wavelet par | ameters Q and f0. Solid lines show
the real part and dashed lines show the imaginary part (see eq. 10.16).
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.co | m
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from astroML.fourier import FT_continuous, IFT_continuous, sinegauss
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
#------------------------------------------------------------
# Set up the wavelets
t0 = 0
t = np.linspace(-0.4, 0.4, 10000)
f0 = np.array([5, 5, 10, 10])
Q = np.array([1, 0.5, 1, 0.5])
# compute wavelets all at once
W = sinegauss(t, t0, f0[:, None], Q[:, None])
#------------------------------------------------------------
# Plot the wavelets
fig = plt.figure(figsize=(5, 3.75))
fig.subplots_adjust(hspace=0.05, wspace=0.05)
# in each panel, plot and label a different wavelet
for i in range(4):
ax = fig.add_subplot(221 + i)
ax.plot(t, W[i].real, '-k')
ax.plot(t, W[i].imag, '--k')
ax.text(0.04, 0.95, "$f_0 = %i$\n$Q = %.1f$" % (f0[i], Q[i]),
ha='left', va='top', transform=ax.transAxes)
ax.set_ylim(-1.2, 1.2)
ax.set_xlim(-0.35, 0.35)
ax.xaxis.set_major_locator(plt.MultipleLocator(0.2))
if i in (0, 1):
ax.xaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_xlabel('$t$')
if i in (1, 3):
ax.yaxis.set_major_formatter(plt.NullFormatter())
else:
ax.set_ylabel('$w(t)$')
plt.show()
|
fro | m .inverse import RandomInverseModel
from .sciopt import BFGSInverseModel, COBYLAInverseModel
from .nn import NNInverseModel
from .wnn import WeightedNNInverseModel, ESWNNInverseModel
from .cmamodel import CMAESInverseModel
from .jacobian import JacobianIn | verseModel
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import mo | dels, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('depot', '0002_lineitem'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=50)),
| ('address', models.TextField()),
('email', models.EmailField(max_length=75)),
],
options={
},
bases=(models.Model,),
),
migrations.AddField(
model_name='lineitem',
name='order',
field=models.ForeignKey(default=datetime.date(2014, 9, 30), to='depot.Order'),
preserve_default=False,
),
]
|
# Plotting performance of string_subst_.py scripts
# bar chart of relative comparison with variances as error bars
import numpy as np
import matplotlib.pyplot as plt
performance = [10.3882388499416,1,10.3212281215746]
variance | = [0.790435196936213,0,0.827207394592818]
scripts = ['string_subst_1.py', 'string_subst_2.py', 'string_subst_3.py']
x_pos = np.arange(len(scripts))
plt.bar(x_pos, perf | ormance, yerr=variance, align='center', alpha=0.5)
plt.xticks(x_pos, scripts)
plt.axhline(y=1, linestyle='--', color='black')
plt.ylim([0,12])
plt.ylabel('rel. performance gain')
plt.title('String substitution - Speed improvements')
#plt.show()
plt.savefig('PNGs/string_subst_bar.png')
|
from datetime import datetime, timedelta
from django import http
from django.conf import settings
from django.core.exceptions import PermissionDenied
import mock
import pytest
from olympia.amo.tests import BaseTestCase, TestCase
from olympia.amo import decorators, get_user, set_user
from olympia.amo.urlresolvers import reverse
from olympia.users.models import UserProfile
pytestmark = pytest.mark.django_db
def test_post_required():
def func(request):
return mock.sentinel.response
g = decorators.post_required(func)
request = mock.Mock()
request.method = 'GET'
assert isinstance(g(request), http.HttpResponseNotAllowed)
request.method = 'POST'
assert g(request) == mock.sentinel.response
def test_json_view():
"""Turns a Python object into a response."""
def func(request):
return {'x': 1}
response = decorators.json_view(func)(mock.Mock())
| assert isinstance(response, http.HttpResponse)
assert response.content == '{"x": 1}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 200
def test_json_view_normal_ | response():
"""Normal responses get passed through."""
expected = http.HttpResponseForbidden()
def func(request):
return expected
response = decorators.json_view(func)(mock.Mock())
assert expected is response
assert response['Content-Type'] == 'text/html; charset=utf-8'
def test_json_view_error():
"""json_view.error returns 400 responses."""
response = decorators.json_view.error({'msg': 'error'})
assert isinstance(response, http.HttpResponseBadRequest)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
def test_json_view_status():
def func(request):
return {'x': 1}
response = decorators.json_view(func, status_code=202)(mock.Mock())
assert response.status_code == 202
def test_json_view_response_status():
response = decorators.json_response({'msg': 'error'}, status_code=202)
assert response.content == '{"msg": "error"}'
assert response['Content-Type'] == 'application/json'
assert response.status_code == 202
class TestTaskUser(TestCase):
fixtures = ['base/users']
def test_set_task_user(self):
@decorators.set_task_user
def some_func():
return get_user()
set_user(UserProfile.objects.get(username='regularuser'))
assert get_user().pk == 999
assert some_func().pk == int(settings.TASK_USER_ID)
assert get_user().pk == 999
class TestLoginRequired(BaseTestCase):
def setUp(self):
super(TestLoginRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
self.request.user.is_authenticated.return_value = False
self.request.get_full_path.return_value = 'path'
def test_normal(self):
func = decorators.login_required(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 302
assert response['Location'] == (
'%s?to=%s' % (reverse('users.login'), 'path'))
def test_no_redirect(self):
func = decorators.login_required(self.f, redirect=False)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_decorator_syntax(self):
# @login_required(redirect=False)
func = decorators.login_required(redirect=False)(self.f)
response = func(self.request)
assert not self.f.called
assert response.status_code == 401
def test_no_redirect_success(self):
func = decorators.login_required(redirect=False)(self.f)
self.request.user.is_authenticated.return_value = True
func(self.request)
assert self.f.called
class TestSetModifiedOn(TestCase):
fixtures = ['base/users']
@decorators.set_modified_on
def some_method(self, worked):
return worked
def test_set_modified_on(self):
users = list(UserProfile.objects.all()[:3])
self.some_method(True, set_modified_on=users)
for user in users:
assert UserProfile.objects.get(pk=user.pk).modified.date() == (
datetime.today().date())
def test_not_set_modified_on(self):
yesterday = datetime.today() - timedelta(days=1)
qs = UserProfile.objects.all()
qs.update(modified=yesterday)
users = list(qs[:3])
self.some_method(False, set_modified_on=users)
for user in users:
date = UserProfile.objects.get(pk=user.pk).modified.date()
assert date < datetime.today().date()
class TestPermissionRequired(TestCase):
def setUp(self):
super(TestPermissionRequired, self).setUp()
self.f = mock.Mock()
self.f.__name__ = 'function'
self.request = mock.Mock()
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_not_allowed(self, action_allowed):
action_allowed.return_value = False
func = decorators.permission_required('', '')(self.f)
with self.assertRaises(PermissionDenied):
func(self.request)
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed(self, action_allowed):
action_allowed.return_value = True
func = decorators.permission_required('', '')(self.f)
func(self.request)
assert self.f.called
@mock.patch('olympia.access.acl.action_allowed')
def test_permission_allowed_correctly(self, action_allowed):
func = decorators.permission_required('Admin', '%')(self.f)
func(self.request)
action_allowed.assert_called_with(self.request, 'Admin', '%')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.