prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
import collections
class Command:
def __init__(self, template):
self._values = template
def __getite | m__(self, key):
value = self._values[key]
if isinstance(value, dict) or isinstance(value, list):
return Command(value)
return self._values[key]
def __setitem__(self, key, value):
try:
self._values[key] = type(self._values[key])(value)
except ValueError:
raise CommandSetValueError
class CommandSetValueError(Exc | eption):
pass
|
from django.conf import settings
from gnip_search.gnip_search_api import GnipSearchAPI
from gnip_search.gnip_search_api import QueryError as GNIPQueryError
class Frequency:
"""
Class collection for Frequency
"""
DATE_FORMAT = "%Y-%m-%d %H:%M"
def __init__(self, query, sample, start, end):
self.query = query
self.sample = sample
self.start = start
self.end = end
self.freq = self.get(self.get_data())
def get_data(self):
"""
Returns data for frequency in list view
"""
# New gnip client with fresh endpoint (this one sets to counts.json)
g = GnipSearchAPI(settings.GNIP_USERNAME,
settings.GNIP_PASSWORD,
settings.GNIP_SEARCH_ENDPOINT,
paged=True)
timeline = None
try:
timeline = g.query_api(
self.query, self.sample, use_case="wordcount", start=self.start.strftime(
self.DATE_FORMAT), end=self.end.strftime(
self.DATE_FORMAT), csv_flag=False)
except GNIPQueryError as e:
print(e)
result = g.freq.get_tokens(20)
| return result
def get(self, data):
| response_data = []
for f in data:
response_data.append(f)
response_data = sorted(response_data, key=lambda f: -f[3])
return response_data
|
from dive_operator import DiveOperator
from dive_python_operator import DivePy | thonOperator
__all__ = ['DiveOperator | ', 'DivePythonOperator']
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
from bottle import route, get, post, view, run, app, default_app, static_file
from palpite import palpite
@get('/')
@view('index.html')
def i | ndex_get():
return dict()
@post('/')
@view('index.html')
def index_post():
gera_palpite = palpite()
return dict(jogo=gera_palpite)
@get('/favicon.ico')
@get('/favicon.png')
def favicon():
return static_file('/static/favicon.png', root='.')
@get('/normalize.css')
def no | rmalizecss():
return static_file('normalize.css', root='static')
@get('/skeleton.css')
def skeletoncss():
arq = 'skeleton.css'
return static_file(arq, root='./static')
app = default_app()
app.run(server='gae',debug=True)
|
"""
ONE-TIME UPDATE OF converting SubredditPage.created_at, FrontPage.created_at to u | tc
Daylight Savings Time = Nov 6 2am
if we see the ET time Nov 6 12am, then it is EDT: EDT-->UTC = +4 = Nov 6 4am
if we see the ET time Nov 6 1-2am, then it is unclear whether it is EDT or EST; assume it is EST
> assumption is becaues I don't think we really care about this, as long as we are consistent
if we see the ET ti | me Nov 6 2:30am, then it is EST: EST-->UTC = +5 = Nov 6 7:30am
if we see the ET time Nov 6 3am, then it is EST: EST-->UTC = +5 = Nov 6 8am
"""
import re, random, string, sys, math, os, datetime
BASE_DIR = os.path.join(os.path.dirname(os.path.realpath(__file__)), "../", "../")
sys.path.append(BASE_DIR)
import simplejson as json
ENV = os.environ['CS_ENV']
with open(os.path.join(BASE_DIR, "config") + "/{env}.json".format(env=ENV), "r") as config:
DBCONFIG = json.loads(config.read())
### LOAD SQLALCHEMY SESSION
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from app.models import Base, SubredditPage, FrontPage
db_engine = create_engine("mysql://{user}:{password}@{host}/{database}".format(
host = DBCONFIG['host'],
user = DBCONFIG['user'],
password = DBCONFIG['password'],
database = DBCONFIG['database']))
Base.metadata.bind = db_engine
DBSession = sessionmaker(bind=db_engine)
db_session = DBSession()
#################
DST_LIMIT_ET = datetime.datetime(2016, 11, 6, 1, 00) # year, month, day, hour, second
EDT_TO_UTC = datetime.timedelta(hours=4) # +4 hours; EDT = in DST; before Nov 6 1am
EST_TO_UTC = datetime.timedelta(hours=5) # +5 hours; EST = not in DST; after Nov 6 1am
for model in [SubredditPage, FrontPage]:
posts = db_session.query(model)
total_posts = posts.count()
num_updated_posts = 0
last_et_time_utc = datetime.datetime.min
last_edt_time = datetime.datetime.min
num_confusing_et_times = 0
print("Testing {0} posts...".format(total_posts))
for post in posts.all():
if not post.is_utc:
created_at_et = post.created_at
if created_at_et < DST_LIMIT_ET:
# is EDT; in DST; before Nov 6 2am = Daylight Savings Time
created_at_utc = created_at_et - EDT_TO_UTC
last_edt_time = max([last_edt_time, created_at_et])
else:
# is EST; out of DST
if created_at_et < DST_LIMIT_ET + datetime.timedelta(hours=1):
# if between 1am and 2am on Nov 6
num_confusing_et_times += 1
created_at_utc = created_at_et - EST_TO_UTC
post.created_at = created_at_utc
post.is_utc = True
num_updated_posts += 1
last_et_time_utc = max([last_et_time_utc, created_at_utc])
print("Updating created_at for {0} posts; updated created_at to UTC up to time {1}; DST found up to time {2}; num_confusing_et_times: {3}".format(num_updated_posts, last_et_time_utc, last_edt_time, num_confusing_et_times))
db_session.commit()
|
"""Contains a baseclass for plugins."""
###############################################################################
#
# TODO: [ ]
#
###############################################################################
# standard library imports
from collections import Iterable
from functools import wraps
import logging
# related third party imports
# application specific imports
from samantha.core import subscribe_to
__version__ = "1.4.10"
# Initialize the logger
LOGGER = logging.getLogger(__name__)
class Plugin(object):
"""Baseclass, that holds the mandatory methods a plugin must support."""
def __init__(self, name="Plugin", active=False,
logger=None, file_path=None, plugin_type="s"):
"""Set the plugin's attributes, if they're not set already."""
self.name = name
self.uid = "NO_UID"
self.is_active = active
if logger:
self.logger = logger
else:
self.logger = LOGGER
if file_path:
self.path = file_path
else:
self.path = __file__
self.plugin_type = plugin_type
plugin_type_long = "device" if self.plugin_type is "d" else "plugin"
self.logger.info("Initialisation of the %s '%s' complete. "
"The %s is %sactive.",
plugin_type_long,
self.name,
plugin_type_long,
"" if self.is_active else "not ")
def __str__(self):
"""Return a simple string representation of the plugin."""
return "{} '{}', UID {}".format(
("Device" if self.plugin_type == "d" else "Plugin"),
self.name,
self.uid)
def __repr__(self):
"""Return a verbose string representation of the plugin."""
return "{type}\t{name:10}\tUID {uid}\tLoaded from {path}".format(
type=("Device" if self.plugin_type == "d" else "Plugin"),
name=self.name,
uid=self.uid,
path=self.path)
class Device(Plugin):
"""Baseclass, that holds the mandatory methods a device must support."""
def __init__(self, name="Device", active=False,
logger=None, file_path=None, group=None):
"""Set the plugin's attributes, if they're not set already."""
self.name = name
self.is_available = None
self.group = group
| self.power_on_keywords = ["turn.on." + self.name.lower()]
self.power_off_keywords = ["turn.off." + self.name.lower()]
if group:
if not isinstance(group, str) and isinstance(group, Iterable):
top_level = []
sub_level = []
words = []
for key in group:
if key[-1] == ".":
sub_level.append(key)
else:
| top_level.append(key)
words.append(key)
for sub in sub_level:
for top in top_level:
words.append(sub + top)
for word in words:
self.power_on_keywords.append("turn.on." + word.lower())
self.power_off_keywords.append("turn.off." + word.lower())
else:
self.power_on_keywords.append("turn.on." + group.lower())
self.power_off_keywords.append("turn.off." + group.lower())
# self.logger.info("Initialisation complete")
super(Device, self).__init__(name, active, logger, file_path, "d")
def turn_on(self, func):
@subscribe_to(self.power_on_keywords)
@wraps(func)
def function(*args, **kwargs):
return func(*args, **kwargs)
return function
def turn_off(self, func):
@subscribe_to(self.power_off_keywords)
@wraps(func)
def function(*args, **kwargs):
return func(*args, **kwargs)
return function
|
class A:
def test(self):
print "I##|nitializing A", "test"##|
attribute = "hello"
| def my_method(self):
print self.attribute
a = A()
a.test()
##r Should expand to Full String "Initializing | A"
# Invalid selection:
# nitializing A", "test" |
.json and task_run.json from MoorFrog
Output is a set of bounding boxes, well pad points,
and pond clicks.
""".format(__docname__, '-' * len(__docname__)))
return 1
#/* ======================================================================= */#
#/* Define print_help_info() function
#/* ======================================================================= */#
def print_help_info():
"""
Print a list of help related flags
:return: 1 for exit code purposes
:rtype: int
"""
print("""
Help flags:
--help -> More detailed description of this utility
--usage -> Arguments, parameters, flags, options, etc.
--version -> Version and ownership information
--license -> License information
""")
return 1
#/* ======================================================================= */#
#/* Define print_version() function
#/* ======================================================================= */#
def print_version():
"""
Print script version information
:return: 1 for exit code purposes
:rtype: int
"""
print("""
%s version %s - released %s
""" % (__docname__, __version__, __release__))
return 1
#/* ======================================================================= */#
#/* Define create_bboxes() function
#/* ======================================================================= */#
def create_bboxes(tasks, layer):
"""
Add bounding boxes to input layer
:param tasks: tasks from json.load(open('task.json'))
| :type tasks: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
| # Update user
print("Creating bounding boxes")
# Define fields
print(" Defining bbox fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('site_id', 254, ogr.OFTString),
('location', 254, ogr.OFTString),
('wms_url', 254, ogr.OFTString),
('county', 254, ogr.OFTString),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
num_tasks = len(tasks)
i = 0
print(" Processing %s tasks..." % str(len(tasks)))
for task in tasks:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(num_tasks)))
sys.stdout.flush()
# Get field content
location = str(task['info']['latitude']) + str(task['info']['longitude']) + '---' + str(task['info']['year'])
field_values = {'id': int(task['id']),
'site_id': str(task['info']['SiteID']),
'location': str(location),
'wms_url': str(task['info']['url']),
'county': str(task['info']['county']),
'year': int(task['info']['year'])}
# Get corner coordinates and assemble into a geometry
coordinates = task['info']['bbox'].split(',')
x_min = float(coordinates[2])
x_max = float(coordinates[0])
y_min = float(coordinates[1])
y_max = float(coordinates[3])
ring = ogr.Geometry(ogr.wkbLinearRing)
ring.AddPoint(x_min, y_max)
ring.AddPoint(x_min, y_min)
ring.AddPoint(x_max, y_min)
ring.AddPoint(x_max, y_max)
ring.CloseRings()
# Create a new feature and assign geometry and field values
rectangle = ogr.Geometry(ogr.wkbPolygon)
rectangle.AddGeometry(ring)
feature = ogr.Feature(layer.GetLayerDefn())
feature.SetGeometry(rectangle)
for field, value in field_values.iteritems():
feature.SetField(field, value)
layer.CreateFeature(feature)
rectangle = None
feature = None
# Update user
print(" - Done")
return True
#/* ======================================================================= */#
#/* Define create_clicks() function
#/* ======================================================================= */#
def create_clicks(tasks, task_runs, layer):
"""
Add click points to layer
:param tasks: tasks from json.load(open('task.json'))
:type tasks: list
:param task_runs: tasks from json.load(open('task_run.json'))
:type task_runs: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
# Update user
print("Creating clicks")
# Define fields
print(" Defining click fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('task_id', 10, ogr.OFTInteger),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
print(" Processing %s tasks..." % str(len(task_runs)))
i = 0
num_task_runs = len(task_runs)
for task_run in task_runs:
# Update user
i += 1
sys.stdout.write("\r\x1b[K" + " %s/%s" % (str(i), str(num_task_runs)))
sys.stdout.flush()
# Get field content
field_values = {'id': int(task_run['id']),
'task_id': int(task_run['task_id'])}
# Get year
for t in tasks:
if t['id'] == task_run['task_id']:
field_values['year'] = int(t['info']['year'])
break
# Get list of clicks
clicks = task_run['info']['positions']
for click in clicks:
feature = ogr.Feature(layer.GetLayerDefn())
# Set field attributes and geometry
point = ogr.CreateGeometryFromWkt("POINT(%f %f)" % (float(click['lon']), float(click['lat'])))
feature.SetGeometry(point)
for field, value in field_values.iteritems():
feature.SetField(field, value)
layer.CreateFeature(feature)
feature = None
# Update user
print(" Done")
return True
#/* ======================================================================= */#
#/* Define get_crowd_selection() function
#/* ======================================================================= */#
def create_wellpads(tasks, layer):
"""
Add click points to layer
:param tasks: tasks from json.load(open('task.json'))
:type tasks: list
:param layer: OGR layer object
:type layer: <ogr.Layer class>
:return: True on success and False on failure
:rtype: bool
"""
# Update user
print("Creating wellpads")
# Define fields
print(" Defining layer fields...")
fields_definitions = (('id', 10, ogr.OFTInteger),
('site_id', 254, ogr.OFTString),
('location', 254, ogr.OFTString),
('wms_url', 254, ogr.OFTString),
('county', 254, ogr.OFTString),
('year', 10, ogr.OFTInteger),
('qaqc', 254, ogr.OFTString))
# Create fields
for field_name, field_width, field_type in fields_definitions:
print(" " + field_name)
field_object = ogr.FieldDefn(field_name, field_type)
field_object.SetWidth(field_width)
layer.CreateField(field_object)
# Loop through tasks and create features
print(" Processing %s tasks..." % str(len(tasks)))
i = 0
num_tasks = len(tasks)
for task in tasks:
# Update user
|
"""Module defining ``Eigensolver`` classes."""
import numpy as np
from gpaw.utilities.blas import axpy
from gpaw.eigensolvers.eigensolver import Eigensolver
from gpaw import extra_parameters
class RMM_DIIS(Eigensolver):
"""RMM-DIIS eigensolver
It is expected that the trial wave functions are orthonormal
and the integrals of projector functions and wave functions
``nucleus.P_uni`` are already calculated
Solution steps are:
* Subspace diagonalization
* Calculation of residuals
* Improvement of wave functions: psi' = psi + lambda PR + lambda PR'
* Orthonormalization"""
def __init__(self, keep_htpsit=True, blocksize=10,
fixed_trial_step=None):
self.fixed_trial_step = fixed_trial_step
Eigensolver.__init__(self, keep_htpsit, blocksize)
def iterate_one_k_point(self, hamiltonian, wfs, kpt):
"""Do a single RMM-DIIS iteration for the kpoint"""
psit_nG, R_nG = self.subspace_diagonalize(hamiltonian, wfs, kpt)
self.timer.start('RMM-DIIS')
if self.keep_htpsit:
self.calculate_residuals(kpt, wfs, hamiltonian, psit_nG,
kpt.P_ani, kpt.eps_n, R_nG)
def integrate(a_G, b_G):
return np.real(wfs.integrate(a_G, b_G, global_integral=False))
comm = wfs.gd.comm
B = self.blocksize
dR_xG = wfs.empty(B, q=kpt.q)
P_axi = wfs.pt.dict(B)
error = 0.0
for n1 in range(0, wfs.bd.mynbands, B):
n2 = n1 + B
if n2 > wfs.bd.mynbands:
n2 = wfs.bd.mynbands
B = n2 - n1
P_axi = dict((a, P_xi[:B]) for a, P_xi in P_axi.items())
dR_xG = dR_xG[:B]
n_x = range(n1, n2)
psit_xG = psit_nG[n1:n2]
if self.keep_htpsit:
R_xG = R_nG[n1:n2]
else:
R_xG = wfs.empty(B, q=kpt.q)
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, psit_xG, R_xG)
wfs.pt.integrate(psit_xG, P_axi, kpt.q)
self.calculate_residuals(kpt, wfs, hamiltonian, psit_xG,
P_axi, kpt.eps_n[n_x], R_xG, n_x)
for n in n_x:
if kpt.f_n is None:
| weight = kpt.weight
else:
weight = kpt.f_n[n]
if self.nbands_converge != 'occupied':
if wfs.bd.global_index(n) < self.nbands_converge:
weight = kpt.weight
else:
| weight = 0.0
error += weight * integrate(R_xG[n - n1], R_xG[n - n1])
# Precondition the residual:
self.timer.start('precondition')
ekin_x = self.preconditioner.calculate_kinetic_energy(
psit_xG, kpt)
dpsit_xG = self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
# Calculate the residual of dpsit_G, dR_G = (H - e S) dpsit_G:
wfs.apply_pseudo_hamiltonian(kpt, hamiltonian, dpsit_xG, dR_xG)
self.timer.start('projections')
wfs.pt.integrate(dpsit_xG, P_axi, kpt.q)
self.timer.stop('projections')
self.calculate_residuals(kpt, wfs, hamiltonian, dpsit_xG,
P_axi, kpt.eps_n[n_x], dR_xG, n_x,
calculate_change=True)
# Find lam that minimizes the norm of R'_G = R_G + lam dR_G
RdR_x = np.array([integrate(dR_G, R_G)
for R_G, dR_G in zip(R_xG, dR_xG)])
dRdR_x = np.array([integrate(dR_G, dR_G) for dR_G in dR_xG])
comm.sum(RdR_x)
comm.sum(dRdR_x)
lam_x = -RdR_x / dRdR_x
if extra_parameters.get('PK', False):
lam_x[:] = np.where(lam_x>0.0, lam_x, 0.2)
# Calculate new psi'_G = psi_G + lam pR_G + lam2 pR'_G
# = psi_G + p((lam+lam2) R_G + lam*lam2 dR_G)
for lam, R_G, dR_G in zip(lam_x, R_xG, dR_xG):
if self.fixed_trial_step is None:
lam2 = lam
else:
lam2 = self.fixed_trial_step
R_G *= lam + lam2
axpy(lam * lam2, dR_G, R_G)
self.timer.start('precondition')
psit_xG[:] += self.preconditioner(R_xG, kpt, ekin_x)
self.timer.stop('precondition')
self.timer.stop('RMM-DIIS')
error = comm.sum(error)
return error, psit_nG
|
"""empty message
Revision ID: 2728b7328b78
Revises: d7c7f3be40a
Create Date: 2015-10-20 13:44:12.129389
"""
# revision ide | ntifiers, used by Alembic.
revision = '2728b7328b78'
down_revision = 'd7c7f3be40a'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generat | ed by Alembic - please adjust! ###
op.drop_table('conference_schedule')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('conference_schedule',
sa.Column('id', sa.INTEGER(), nullable=False),
sa.Column('entry', sa.VARCHAR(length=256), nullable=True),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
|
project: str = None,
service_account_file: str = "",
):
channel.initialize()
self.name = name
self.display_name = display_name
self.allow_password_signup = allow_password_signup
self.enable_email_link_signin = enable_email_link_signin
self.disable_auth = disable_auth
self.enable_anonymous_user = enable_anonymous_user
self.mfa_config = mfa_config
self.test_phone_numbers = test_phone_numbers
self.project = project
self.service_account_file = service_account_file
def apply(self):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.ApplyIdentitytoolkitAlphaTenantRequest()
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
request.service_account_file = self.service_account_file
response = stub.ApplyIdentitytoolkitAlphaTenant(request)
self.name = Primitive.from_proto(response.name)
self.display_name = Primitive.from_proto(response.display_name)
self.allow_password_signup = Primitive.from_proto(
response.allow_password_signup
)
self.enable_email_link_signin = Primitive.from_proto(
response.enable_ | email_link_signin
)
self.disable_auth = Primitive.from_proto(response.disable_auth)
self.enable_anonymous_user = Primitive.from_proto(
response.enable_anonymous_user
)
self.mfa_config = TenantMfaConfig.from_proto(response.mfa_config)
self.test_phone_numbers = Primitive.from_proto(response.test_phone_numbers)
self.proje | ct = Primitive.from_proto(response.project)
def delete(self):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.DeleteIdentitytoolkitAlphaTenantRequest()
request.service_account_file = self.service_account_file
if Primitive.to_proto(self.name):
request.resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
request.resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
request.resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
request.resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
request.resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
request.resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
request.resource.mfa_config.CopyFrom(
TenantMfaConfig.to_proto(self.mfa_config)
)
else:
request.resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
request.resource.test_phone_numbers = Primitive.to_proto(
self.test_phone_numbers
)
if Primitive.to_proto(self.project):
request.resource.project = Primitive.to_proto(self.project)
response = stub.DeleteIdentitytoolkitAlphaTenant(request)
@classmethod
def list(self, project, service_account_file=""):
stub = tenant_pb2_grpc.IdentitytoolkitAlphaTenantServiceStub(channel.Channel())
request = tenant_pb2.ListIdentitytoolkitAlphaTenantRequest()
request.service_account_file = service_account_file
request.Project = project
return stub.ListIdentitytoolkitAlphaTenant(request).items
def to_proto(self):
resource = tenant_pb2.IdentitytoolkitAlphaTenant()
if Primitive.to_proto(self.name):
resource.name = Primitive.to_proto(self.name)
if Primitive.to_proto(self.display_name):
resource.display_name = Primitive.to_proto(self.display_name)
if Primitive.to_proto(self.allow_password_signup):
resource.allow_password_signup = Primitive.to_proto(
self.allow_password_signup
)
if Primitive.to_proto(self.enable_email_link_signin):
resource.enable_email_link_signin = Primitive.to_proto(
self.enable_email_link_signin
)
if Primitive.to_proto(self.disable_auth):
resource.disable_auth = Primitive.to_proto(self.disable_auth)
if Primitive.to_proto(self.enable_anonymous_user):
resource.enable_anonymous_user = Primitive.to_proto(
self.enable_anonymous_user
)
if TenantMfaConfig.to_proto(self.mfa_config):
resource.mfa_config.CopyFrom(TenantMfaConfig.to_proto(self.mfa_config))
else:
resource.ClearField("mfa_config")
if Primitive.to_proto(self.test_phone_numbers):
resource.test_phone_numbers = Primitive.to_proto(self.test_phone_numbers)
if Primitive.to_proto(self.project):
resource.project = Primitive.to_proto(self.project)
return resource
class TenantMfaConfig(object):
def __init__(self, state: str = None, enabled_providers: list = None):
self.state = state
self.enabled_providers = enabled_providers
@classmethod
def to_proto(self, resource):
if not resource:
return None
res = tenant_pb2.IdentitytoolkitAlphaTenantMfaConfig()
if TenantMfaConfigStateEnum.to_proto(resource.state):
res.state = TenantMfaConfigStateEnum.to_proto(resource.state)
if TenantMfaConfigEnabledProvidersEnumArray.to_proto(
resource.enabled_providers
):
res.enabled_providers.extend(
TenantMfaConfigEnabledProvidersEnumArray.to_proto(
resource.enabled_providers
)
)
return res
@classmethod
def from_proto(self, resource):
if not resource:
return None
return TenantMfaConfig(
state=TenantMfaConfigStateEnum.from_proto(resource.state),
enabled_providers=TenantMfaConfigEnabledProvidersEnumArray.from_proto(
resource.enabled_providers
),
)
class TenantMfaConfigArray(object):
@classmethod
def to_proto(self, resources):
if not resources:
return resources
return [TenantMfaConfig.to_proto(i) for i in resources]
@cla |
# -*- coding: utf-8 -*-
"""The application's model objects"""
from zope.sqlalchemy import ZopeTransactionExtension
from sqlalchemy.orm import scoped_session, sessionmaker
# from sqlalchemy import MetaData
from sqlalchemy.ext.declarative import declarative_base
# Global session manager: DBSession() returns the Thread-local
# session object appropriate for the current web request.
#===============================================================================
# test by cl
#===============================================================================
import sqlalchemy
from datetime import date, datetime as dt
from sqlalchemy.orm.session import SessionExtension
from sqlalchemy.orm import attributes, object_mapper
DB_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
class LogSessionExtension(SessionExtension):
def before_flush(self, session, flush_context, instances):
print "_^" * 30
print "Come into my log session extension"
print "_*" * 30
log = []
for obj in session.dirty:
obj_mapper = object_mapper(obj)
obj_state = attributes.instance_state(obj)
for om in obj_mapper.iterate_to_root():
for obj_col in om.local_table.c:
try:
prop = obj_mapper.get_property_by_column(obj_col)
except UnmappedColumnError:
continue
try:
need2log = obj_col.info["auto_log"]
except:
continue
else:
if not need2log : continue
if prop.key not in obj_state.dict:
getattr(obj, prop.key)
history = attributes.get_history(obj, prop.key)
if not history.has_changes():continue
a, u, d = history
if d:
attr_old_value = d[0]
elif u:
attr_old_value = u[0]
else:
attr_old_value = ""
attr_new_value = a[0] or ""
if not self._isUpdateReally(obj_col, attr_old_value, attr_new_value) : continue
_old, _new = self._2string(obj_col, attr_old_value, attr_new_value)
log.append((obj_col.info.get("field_name", prop.key), _old, _new))
if log :
print log
def _isUpdateReally(self, col, old_value, new_value):
if not old_value and not new_value : return False
if not (old_value and new_value) : return True
if isinstance(col.type, sqlalchemy.types.Integer): return old_value == int(new_value)
if isinstance(col.type, sqlalchemy.types.Float): return old_value == float(new_value)
if isinstance(col.type, (sqlalchemy.types.Unicode, sqlalchemy.types.String)): return unicode(old_value) == unicode(new_value)
if isinstance(col.type, (sqlalchemy.types.Date, sqlalchemy.types.DateTime)) : return old_value == dt.strptime(new_value, DB_DATE_FORMAT)
# if isinstance(prop.type, sqlalchemy.types.Boolean) : return old_value == bool(new_value)
return False
def _2string(self, col, old_value, new_value):
if isinstance(col.type, sqlalchemy.types.Integer): return (old_value or '', new_value or '')
if isinstance(col.type, sqlalchemy.types.Float): return (old_value or '', new_value or '')
if isinstance(col.type, (sqlalchemy.types.Unicode, sqlalchemy.types.String)): return (old_value or "", new_value or "")
if isinstance(col.type, (sqlalchemy.types.Date, sqlalchemy.types.DateTime)) :
_o = "" if not old_value else old_value.strftime(DB_DATE_FORMAT)
_n = new_value or ""
return (_o, _n)
return (old_value, new_value)
# maker = sessionmaker(autoflush = True, autocommit = False,
# extension = [ LogSessionExtension(), ZopeTransactionExtension(), ])
maker = sessionmaker(autoflush = True, autocommit = False,
extension = ZopeTransactionExtension())
DBSession = scoped_session(maker)
# Base class for all of our model classes: By default, the data model is
# defined with SQLAlchemy's declarative extension, but if you need more
# control, you can switch to the traditional method.
DeclarativeBase = declarative_base()
# There are two convenient ways for you to spare some typing.
# You can have a query property on all your model classes by doing this:
# DeclarativeBase.query = DBSession.query_property()
# Or you can use a session-aware mapper as it was used in TurboGears 1:
# DeclarativeBase = declarative_base(mapper=DBSession.mapper)
# Global metadata.
# The default metadata is the one from the declarative base.
metadata = DeclarativeBase.metadata
# If you have multiple databases with overlapping table names, you'll need a
# metadata for each database. Feel free to rename 'metadata2'.
# metadata2 = MetaData()
#####
# Generally you will not want to define your table's mappers, and data objects
# here in __init__ but will want to create modules them in the model directory
# and import them at the bottom of this file.
#
######
def init_model(engine):
"""Call me before using any of the tables or classes in the model."""
engine.dialect.supports_sane_rowcount = False
DBSession.configure(bind = engine)
# If you are using reflection to introspect your database and create
# table objects for you, your tables must be defined and mapped inside
# the init_model function, so that the engine is available if you
# use the model outsi | de tg2, you need to make sure this is called before
# you use the model.
#
# See the following example:
# global t_reflected
# t_reflected = Table("Reflected", metadata,
# autoload=True, autoload_with=engine)
# mapper(Reflected, t_reflected)
# Import your model modules here.
from tribal.model.auth import User, Group, Permission
from tribal.model.sportsware import *
from tribal.model.orsay import *
from tribal.model.orchestra import *
from tribal.model.sampl | e import *
# from tribal.model.pei import *
from tribal.model.sysutil import *
from tribal.model.dba import *
from tribal.model.bby import *
from tribal.model.tag import *
# from tribal.model.cabelas import *
from tribal.model.lemmi import *
from tribal.model.tmw import *
from tribal.model.mglobalpack import *
from tribal.model.prepress import *
|
import os
import flask
import memdam.blobstore.localfolder
import memdam.eventstore.sqlite
from memdam.server.web import app
def get_archive(username):
| """
:param username: the name of the user for which we should get the event archive
:type username: string
:returns: a new (or cached) archive
:rtype: memdam.eventstore.api.Eventstore
"""
db_file = app.config['DATABASE_FOLDER']
if db_file == ':memory:':
| return flask.g._archives[username]
assert db_file != ''
db_file = os.path.join(db_file, username)
if not os.path.exists(db_file):
os.makedirs(db_file)
archive = memdam.eventstore.sqlite.Eventstore(db_file)
return archive
def get_blobstore(username):
"""
:param username: the name of the user for which we should get the blobstore folder.
:type username: string
:returns: a new (or cached) blobstore
:rtype: memdam.blobstore.api.Blobstore
"""
base_folder = app.config['BLOBSTORE_FOLDER']
user_folder = os.path.join(base_folder, username)
if not os.path.exists(user_folder):
os.makedirs(user_folder)
blobstore = memdam.blobstore.localfolder.Blobstore(user_folder)
return blobstore
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from nipype.testing import assert_equal
from nipype.interfaces.semtools.utilities.brains import BRAINSInitializedControlPoints
def test_BRAINSInitializedControlPoints_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputVolume=dict(argstr='--inputVolume %s',
),
numberOfThreads=dict(argstr='--numberOfThreads %d',
),
outputLandmarksFile=dict(argstr='--outputLandmarksFile %s',
),
outputVolume=dict(argstr='--outputVolume %s',
hash_files=False,
),
permuteOrder=dict(argstr='--permuteOrder %s',
sep=',',
),
splineGridSize=dict(argstr='--splineGridSize %s',
sep=',',
),
terminal_output=dict(nohash=True,
),
)
inputs = BRAINSInitializedControlPoints.input_spec()
for key, metadata in input_map.items():
for metakey, value in metadata.items():
yield assert_equal, getattr(inputs.traits()[key], metakey), value
def test_BRAINSInitializedControlPoints_outputs():
output_map = dict(outputVolume=dict(),
)
outputs = BRAINSInitializedControlPoints.output_spec()
| for key, metadata in output_map.items():
for metakey, value in met | adata.items():
yield assert_equal, getattr(outputs.traits()[key], metakey), value
|
#! /usr/bin/python
# Copyright (c) 2015, Matthew P. Grosvenor
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the project, the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os
import numpy
if len(sys.argv) < 3:
print "Usage: do_hist <input_file> <bins>"
sys.exit(-1)
bins_count = int(sys.argv[2])
nums = []
parse_errors = 0
range_errors = 0
for line in open(sys.argv[1]):
try:
num = float(line)
except:
parse_errors += 1
continue
if num < 0 or num > 1000000000:
range_errors += 1
continue
nums.append(num)
print "Parse errors found: %i" % (parse_errors)
print "Range errors found: %i" % (range_errors | )
if l | en(nums) < 100:
print "Fatal Error! Not enough points (%i)" % len(nums)
sys.exit(-1)
nums.sort()
#min = numpy.min(nums)
#max = numpy.max(nums)
#print min,max
(ys,xs) = numpy.histogram(nums,bins=bins_count, range=[min(nums),max(nums)])
percent = [0,1,10,25,50,75,90,99,100]
print "Total samples %i" % len(nums)
for p in percent:
print "%3i%% - %fms" % (p, numpy.percentile(nums,p * 1.0))
#print "%i" % (numpy.percentile(nums,p * 1.0))
print "Range = %fms" % (numpy.max(nums) - numpy.min(nums))
out = open("hist_out.ssv","w")
for i in range(0,len(ys)):
out.write("%i %f\n" % (xs[i],ys[i] * 100.0 /len(nums)))
|
import psyco
psyco.full()
import linker.coff
from linker import store
m,n='python26.dll','Py_DecRef'
localname = None,'__imp__<%s!%s>'%(m,n)
if True:
# this should import from python26.lib,Py_DecRef
# this should export ia32.obj,stuff
a = linker.coff.object.open('~/work/syringe/src | /ia32.obj')
# imports None,Py_DecRef
# this should import from python26.dll,Py_DecRef
# this should export Py_DecRef
b = linker.coff.library.open('~/python26/libs/python26.lib')
# imports python26.dll,Py_DecRef
# exports None,Py_DecRef
# this should import from whatever
# and export whatever
c = linker.coff.executable.open('~/../../windows/syswow64/python26.dll')
# expots python26.dll,Py_DecRef
| d = linker.coff.executable.open('~/../../windows/syswow64/msvcr100.dll')
# raise NotImplementedError("symbol consolidation isn't working")
if True:
z = b
z[store.BaseAddress] = 0x10000000
for x in z.undefined:
z[x] = 0xbbbbbbbb
out = file('blah','wb')
for x in z.segments:
y = z.getsegment(x)
y = z.relocatesegment(x, y)
out.write(y)
out.close()
if False:
#print a
#print c
if True:
z = linker.new()
print a
z.addstore(a)
print b
z.addstore(b)
print c
z.addstore(c)
print d
z.addstore(d)
if False:
m,n='msvcr100.dll','_heapmin'
print True,(None,n) in d.globals
print False,(None,n) in z.globals
print False,(m,n) in d.globals
print True,(m,n) in z.globals
if False:
paths = '~/../../windows/syswow64','~/python26/dlls'
# dlls = 'ntdll.dll','kernel32.dll','python26.dll','msvcr100.dll','shell32.dll','user32.dll','gdi32.dll','pcwum.dll','advapi32.dll','shlwapi.dll','cryptsp.dll','msvcrt.dll','kernelbase.dll','shunimpl.dll','sspicli.dll'
dlls = 'msvcr100.dll',
for filename in dlls:
print 'loading %s'% filename
for p in paths:
try:
z.addstore(linker.coff.executable.open('%s/%s'%(p,filename)))
break
except IOError:
pass
continue
continue
print [(m,n) for m,n in z.undefined if m is None]
if False:
modules = set((m for m,n in z.undefined if m is not None))
print [(m,n) for m,n in z.undefined if m is None]
for filename in modules:
if '-' in filename:
continue
print 'loading %s'% filename
for p in paths:
try:
z.addstore(linker.coff.executable.open('%s/%s'%(p,filename)))
break
except IOError:
pass
continue
continue
if True:
z[store.BaseAddress] = 0x10000000
for x in z.undefined:
z[x] = 0xbbbbbbbb
if True:
print '-'*25
out = file('blah','wb')
for x in z.segments:
y = z.getsegment(x)
y = z.relocatesegment(x, y)
out.write(y)
out.close()
if False:
print '-'*25
for x in a.externals:
a[x] = 0xbbbbbbbb
a[store.BaseAddress] = 0x10000000
b = a.getsegment('.text')
c = a.relocatesegment('.text',b)
# import ptypes
# print ptypes.hexdump(c, a['.text'])
|
tReaderTC
from hwtLib.structManipulators.structWriter_test import StructWriter_TC
from hwtLib.tests.constraints.xdc_clock_related_test import ConstraintsXdcClockRelatedTC
from hwtLib.tests.frameTmpl_test import FrameTmplTC
from hwtLib.tests.pyUtils.arrayQuery_test import ArrayQueryTC
from hwtLib.tests.pyUtils.fileUtils_test import FileUtilsTC
from hwtLib.tests.rdSynced_agent_test import RdSynced_agent_TC
from hwtLib.tests.repr_of_hdlObjs_test import ReprOfHdlObjsTC
from hwtLib.tests.resourceAnalyzer_test import ResourceAnalyzer_TC
from hwtLib.tests.serialization.hdlReaname_test import SerializerHdlRename_TC
from hwtLib.tests.serialization.ipCorePackager_test import IpCorePackagerTC
from hwtLib.tests.serialization.modes_test import SerializerModes_TC
from hwtLib.tests.serialization.tmpVar_test import Serializer_tmpVar_TC
from hwtLib.tests.serialization.vhdl_test import Vhdl2008Serializer_TC
from hwtLib.tests.simulator.basicRtlSimulatorVcdTmpDirs_test import BasicRtlSimulatorVcdTmpDirs_TCs
from hwtLib.tests.simulator.json_log_test import HsFifoJsonLogTC
from hwtLib.tests.simulator.utils_test import SimulatorUtilsTC
from hwtLib.tests.structIntf_operator_test import StructIntf_operatorTC
from hwtLib.tests.synthesizer.astNodeIoReplacing_test import AstNodeIoReplacingTC
from hwtLib.tests.synthesizer.interfaceLevel.interfaceSynthesizerTC import \
InterfaceSynthesizerTC
from hwtLib.tests.synthesizer.interfaceLevel.subunitsSynthesisTC import \
SubunitsSynthesisTC
from hwtLib.tests.synthesizer.rtlLevel.basic_signal_methods_test import BasicSignalMethodsTC
from hwtLib.tests.synthesizer.rtlLevel.statements_consystency_test import StatementsConsystencyTC
from hwtLib.tests.synthesizer.statementTreesInternal_test import StatementTreesInternalTC
from hwtLib.tests.synthesizer.statementTrees_test import StatementTreesTC
from hwtLib.tests.synthesizer.statements_test import StatementsTC
from hwtLib.tests.transTmpl_test import TransTmpl_TC
from hwtLib.tests.types.bitsSlicing_test import BitsSlicingTC
from hwtLib.tests.types.hstructVal_test import HStructValTC
from hwtLib.tests.types.hvalue_test import HValueTC
from hwtLib.tests.types.operators_test import OperatorTC
from hwtLib.tests.types.union_test import UnionTC
from hwtLib.tests.unionIntf_test import UnionIntfTC
from hwtLib.xilinx.ipif.axi4Lite_to_ipif_test import Axi4Lite_to_IpifTC
from hwtLib.xilinx.ipif.buff_test import IpifBuffTC
from hwtLib.xilinx.ipif.endpoint_test import IpifEndpointTC, \
IpifEndpointDenseTC, IpifEndpointDenseStartTC, IpifEndpointArray
from hwtLib.xilinx.ipif.interconnectMatrix_test import IpifInterconnectMatrixTC
from hwtLib.xilinx.locallink.axis_conv_test import AxiS_localLinkConvTC
from hwtLib.xilinx.primitive.examples.dsp48e1Add_test import Dsp48e1Add_TCs
from hwtLib.xilinx.slr_crossing_test import HsSlrCrossingTC
from hwtLib.amba.axi_comp.oooOp.reorder_buffer_test import ReorderBufferTC
# from hwt.simulator.simTestCase import SimTestCase
def testSuiteFromTCs(*tcs):
loader = TestLoader()
for tc in tcs:
if not issubclass(tc, SimTestCase):
tc._multiprocess_can_split_ = True
loadedTcs = [
loader.loadTestsFromTestCase(tc) for tc in tcs
# if not issubclass(tc, SimTestCase) # [debug] skip simulations
]
suite = TestSuite(loadedTcs)
return suite
suite = testSuiteFromTCs(
# basic tests
FileUtilsTC,
ArrayQueryTC,
RtlLvlTC,
ReprOfHdlObjsTC,
HdlCommentsTC,
InterfaceSynthesizerTC,
SubunitsSynthesisTC,
EmptyUnitWithSpiTC,
Simple2withNonDirectIntConnectionTC,
SimpleWithNonDirectIntConncetionTC,
SimpleSubunit3TC,
UnitToUnitConnectionTC,
OperatorTC,
StructIntf_operatorTC,
CastTc,
BitsSlicingTC,
HStructValTC,
ParametrizationTC,
BasicSignalMethodsTC,
StatementsConsystencyTC,
HValueTC,
StatementTreesInternalTC,
StatementTreesTC,
StatementsTC,
AstNodeIoReplacingTC,
ErrorsTC,
StaticForLoopCntrlTC,
SimpleUnitWithParamTC,
SimpleSubunit2TC,
HierarchySerializationTC,
ListOfInterfacesSample0TC,
ListOfInterfacesSample1TC,
ListOfInterfacesSample2TC,
ListOfInterfacesSample3TC,
ListOfInterfacesSample4TC,
PrivateSignalsOfStructTypeTC,
FrameTmplTC,
Showcase0TC,
SimulatorUtilsTC,
HsFifoJsonLogTC,
RdSynced_agent_TC,
Segment7TC,
SerializerModes_TC,
Serializer_tmpVar_TC,
SerializerHdlRename_TC,
VhdlVectorAutoCastExampleTC,
TransTmpl_TC,
UnionTC,
UnionIntfTC,
ResourceAnalyzer_TC,
CombLoopAnalysisTC,
Vhdl2008Serializer_TC,
CodeBlokStmTC,
IfStmTC,
SwitchStmTC,
SimpleRomTC,
SimpleSyncRomTC,
RomResourcesTC,
DRegTC,
DoubleRRegTC,
DReg_asyncRstTC,
RegSerializationTC,
CntrTC,
CntrResourceAnalysisTC,
ConstConditionTC,
TemplateConfigured_TC,
FrameAlignmentUtilsTC,
FrameJoinUtilsTC,
HwExceptionCatch_TC,
PseudoLru_TC,
# tests of simple units
TimerTC,
ConcatTC,
VldMaskConflictsResolvingTC,
ConstDriverTC,
WidthCastingExampleTC,
SimpleTC,
SimpleSubunitTC,
RamTC,
RamXorSingleClockTC,
*RamTransactionalTCs,
BramWireTC,
LutRamTC,
FsmSerializationTC,
FsmExampleTC,
Ha | drcodedFsmExampleTC,
OneHotToBinTC,
BinToBcdTC,
BcdToBinTC,
AxiS_strFormat_TC,
BinToOneHotTC,
GrayCntrTC,
TwoCntrsTC,
SelfRefCntrTC,
CountLeadingTC,
MultiplierBoothTC,
IndexingTC,
CdcTC,
RamResourcesTC,
SimpleAsyncRamTC,
SimpleSyncRamTC,
SimpleUnitAxiStream_TC,
FifoWriterAgentTC,
FifoReaderAgentTC,
FifoTC,
FifoAsyncTC,
FifoArrayTC,
HsJoinPrioritizedTC,
HsJoinPrioritize | d_randomized_TC,
HsJoinFair_2inputs_TC,
HsJoinFair_3inputs_TC,
HandshakedCdc_slow_to_fast_TC,
HandshakedCdc_fast_to_slow_TC,
*HandshakedToAxiStreamTCs,
*RamAsHs_TCs,
LfsrTC,
BitonicSorterTC,
InterfaceWithArrayTypesTC,
FlipRegTC,
FlipCntrTC,
FlipRamTC,
HsSplitCopyTC,
HsSplitCopy_randomized_TC,
HsFifoTC,
HsFifoAsyncTC,
*HandshakedRegTCs,
HsResizerTC,
HsBuilderSplit_TC,
CamTC,
UartTxTC,
UartRxBasicTC,
UartRxTC,
UartTxRxTC,
SpiMasterTC,
I2CMasterBitCntrlTC,
*EthernetMac_rx_TCs,
*EthernetMac_tx_TCs,
MdioMasterTC,
Hd44780Driver8bTC,
CrcUtilsTC,
CrcCombTC,
CrcTC,
UsbAgentTC,
*UlpiAgent_TCs,
*UtmiAgentTCs,
Utmi_to_UlpiTC,
Usb2SieDeviceRxTC,
Usb2SieDeviceTxTC,
Usb2CdcVcpTC,
*UsbipTCs,
BusEndpointTC,
*BramPortEndpointTCs,
# avalon tests
AvalonMmAgentTC,
*AvalonMmEndpointTCs,
AvalonMmBram_TC,
*AxiToAvalonMm_TCs,
AvalonStAgentTC,
AvalonMmBuff_TC,
# axi tests
SimpleAxiRegsTC,
AxiTC,
*AxiLiteEndpointTCs,
*AxiLiteEndpointArrTCs,
AxiLiteEndpoint_struct_TC,
AxiLiteEndpoint_arrayStruct_TC,
AxiLiteEndpoint_fromInterfaceTC,
AxiLiteEndpoint_fromInterface_arr_TC,
AxiLite_to_Axi_TC,
Axi_to_AxiLite_TC,
AxiRegTC,
AxiTesterTC,
*AxiStaticRemapTCs,
AxiResizeTC,
AxisFrameGenTC,
*AddrDataHs_to_Axi_TCs,
Axi4BRam_TC,
*Axi_rDatapump_alignedTCs,
*Axi_rDatapump_unalignedTCs,
*Axi_wDatapumpTCs,
AxiSlaveTimeoutTC,
AxiSStoredBurstTC,
AxiS_en_TC,
AxiS_fifoMeasuringTC,
AxiSFifoDropTC,
*AxiS_resizer_TCs,
AxiS_frameDeparser_TC,
AxiS_localLinkConvTC,
AxiS_footerSplitTC,
AxiS_frameParserTC,
*AxiS_FrameJoin_TCs,
HandshakedBuilderSimpleTC,
*EthAddrUpdaterTCs,
RStrictOrderInterconnectTC,
WStrictOrderInterconnectTC,
WStrictOrderInterconnect2TC,
WStrictOrderInterconnectComplexTC,
*AxiInterconnectMatrixAddrCrossbar_TCs,
*AxiInterconnectMatrixCrossbar_TCs,
*AxiInterconnectMatrixR_TCs,
*AxiInterconnectMatrixW_TCs,
*AxiWriteAggregator_TCs,
*AxiReadAggregator_TCs,
*AxiStoreQueueWritePropagating_TCs,
*AxiCacheWriteAllocWawOnlyWritePropagatingTCs,
Axi_ag_TC,
Axi4_streamToMemTC,
ArrayItemGetterTC,
ArrayItemGetter2in1WordTC,
ArrayBuff_wr |
# ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
# <pep8 compliant>
import bpy
from rna_prop_ui import PropertyPanel
class DataButtonsPanel():
bl_space_type = 'PROPERTIES'
bl_region_type = 'WINDOW'
bl_context = "data"
@classmethod
def poll(cls, context):
return context.meta_ball
class DATA_PT_context_metaball(DataButtonsPanel, bpy.types.Panel):
bl_label = ""
bl_options = {'HIDE_HEADER'}
def draw(self, context):
layout = self.layout
ob = context.object
mball = context.meta_ball
space = context.space_data
if ob:
layout.template_ID(ob, "data", unlink="None")
elif mball:
layout.template_ID(space, "pin_id", unlink="None")
class DATA_PT_metaball(DataButtonsPanel, bpy.types.Panel):
bl_label = "Metaball"
def draw(self, context):
layout = self.layout
mball = context.meta_ball
split = layout.split()
col = split.column()
col.label(text="Resolution:")
sub = col.column(align=True)
sub.prop(mball, "resolution", text="View")
sub.prop(mball, "render_resolution", text="Render")
col = split.column()
col.label(text="Settings:")
col.prop(mball, "threshold", text="Threshold")
layout.label(text="Update:")
layout.prop(mball, "update_method", expand=True)
class DATA_PT_metaball_element(DataButtonsPanel, bpy.types.Panel):
bl_labe | l = "Active Ele | ment"
@classmethod
def poll(cls, context):
return (context.meta_ball and context.meta_ball.elements.active)
def draw(self, context):
layout = self.layout
metaelem = context.meta_ball.elements.active
layout.prop(metaelem, "type")
split = layout.split()
col = split.column(align=True)
col.label(text="Settings:")
col.prop(metaelem, "stiffness", text="Stiffness")
col.prop(metaelem, "use_negative", text="Negative")
col.prop(metaelem, "hide", text="Hide")
col = split.column(align=True)
if metaelem.type in ('CUBE', 'ELLIPSOID'):
col.label(text="Size:")
col.prop(metaelem, "size_x", text="X")
col.prop(metaelem, "size_y", text="Y")
col.prop(metaelem, "size_z", text="Z")
elif metaelem.type == 'TUBE':
col.label(text="Size:")
col.prop(metaelem, "size_x", text="X")
elif metaelem.type == 'PLANE':
col.label(text="Size:")
col.prop(metaelem, "size_x", text="X")
col.prop(metaelem, "size_y", text="Y")
class DATA_PT_custom_props_metaball(DataButtonsPanel, PropertyPanel, bpy.types.Panel):
COMPAT_ENGINES = {'BLENDER_RENDER', 'BLENDER_GAME'}
_context_path = "object.data"
_property_type = bpy.types.MetaBall
def register():
pass
def unregister():
pass
if __name__ == "__main__":
register()
|
import os
def create_fds():
(fd1, fd2) = os.p | ipe()
return (os.fdopen(fd2, "w"), os.fdopen(fd1, "r"))
def filename(pipef):
return 'pipe:[%d]' % os.fstat(pipef.f | ileno()).st_ino
def dump_opts(sockf):
return [ ]
|
"""MiniAEFrame - A minimal AppleEvent Application framework.
There are two classes:
AEServer -- a mixin class offering nice AE handling.
MiniApplication -- a very minimal alternative to FrameWork.py,
only suitable for the simplest of AppleEvent servers.
"""
from warnings import warnpy3k
warnpy3k("In 3.x, the MiniAEFrame module is removed.", stacklevel=2)
import traceback
import MacOS
from Carbon import AE
from Carbon.AppleEvents import *
from Carbon import Evt
from Carbon.Events import *
from Carbon import Menu
from Carbon import Win
from Carbon.Windows import *
from Carbon import Qd
import aetools
import EasyDialogs
kHighLevelEvent = 23 # Not defined anywhere for Python yet?
class MiniApplication:
"""A minimal FrameWork.Application-like class"""
def __init__(self):
self.quitting = 0
# Initialize menu
self.appleid = 1
self.quitid = 2
Menu.ClearMenuBar()
self.applemenu = applemenu = Menu.NewMenu(self.appleid, "\024")
applemenu.AppendMenu("%s;(-" % self.getaboutmenutext())
if MacOS.runtimemodel == 'ppc':
applemenu.AppendResMenu('DRVR')
applemenu.InsertMenu(0)
self.quitmenu = Menu.NewMenu(self.quitid, "File")
self.quitmenu.AppendMenu("Quit")
self.quitmenu.SetItemCmd(1, ord("Q"))
self.quitmenu.InsertMenu(0)
Menu.DrawMenuBar()
def __del__(self):
self.close()
def close(self):
pass
def mainloop(self, mask = everyEvent, timeout = 60*60):
while not self.quitting:
self.dooneevent(mask, timeout)
def _quit(self):
self.quitting = 1
def dooneevent(self, mask = everyEvent, timeout = 60*60):
got, event = Evt.WaitNextEvent(mask, timeout)
if got:
self.lowlevelhandler(event)
def lowlevelhandle | r(self, event):
what, message, when, where, modifiers = event
h, v = where
if what == kHighLevelEvent:
msg = "High Level Event: %r %r" % (code(message), code(h | (v<<16)))
try:
AE.AEProcessAppleEvent(event)
except AE.Error, err:
print 'AE error: ', e | rr
print 'in', msg
traceback.print_exc()
return
elif what == keyDown:
c = chr(message & charCodeMask)
if modifiers & cmdKey:
if c == '.':
raise KeyboardInterrupt, "Command-period"
if c == 'q':
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
return
elif what == mouseDown:
partcode, window = Win.FindWindow(where)
if partcode == inMenuBar:
result = Menu.MenuSelect(where)
id = (result>>16) & 0xffff # Hi word
item = result & 0xffff # Lo word
if id == self.appleid:
if item == 1:
EasyDialogs.Message(self.getabouttext())
elif item > 1 and hasattr(Menu, 'OpenDeskAcc'):
name = self.applemenu.GetMenuItemText(item)
Menu.OpenDeskAcc(name)
elif id == self.quitid and item == 1:
if hasattr(MacOS, 'OutputSeen'):
MacOS.OutputSeen()
self.quitting = 1
Menu.HiliteMenu(0)
return
# Anything not handled is passed to Python/SIOUX
if hasattr(MacOS, 'HandleEvent'):
MacOS.HandleEvent(event)
else:
print "Unhandled event:", event
def getabouttext(self):
return self.__class__.__name__
def getaboutmenutext(self):
return "About %s\311" % self.__class__.__name__
class AEServer:
def __init__(self):
self.ae_handlers = {}
def installaehandler(self, classe, type, callback):
AE.AEInstallEventHandler(classe, type, self.callback_wrapper)
self.ae_handlers[(classe, type)] = callback
def close(self):
for classe, type in self.ae_handlers.keys():
AE.AERemoveEventHandler(classe, type)
def callback_wrapper(self, _request, _reply):
_parameters, _attributes = aetools.unpackevent(_request)
_class = _attributes['evcl'].type
_type = _attributes['evid'].type
if (_class, _type) in self.ae_handlers:
_function = self.ae_handlers[(_class, _type)]
elif (_class, '****') in self.ae_handlers:
_function = self.ae_handlers[(_class, '****')]
elif ('****', '****') in self.ae_handlers:
_function = self.ae_handlers[('****', '****')]
else:
raise 'Cannot happen: AE callback without handler', (_class, _type)
# XXXX Do key-to-name mapping here
_parameters['_attributes'] = _attributes
_parameters['_class'] = _class
_parameters['_type'] = _type
if '----' in _parameters:
_object = _parameters['----']
del _parameters['----']
# The try/except that used to be here can mask programmer errors.
# Let the program crash, the programmer can always add a **args
# to the formal parameter list.
rv = _function(_object, **_parameters)
else:
#Same try/except comment as above
rv = _function(**_parameters)
if rv is None:
aetools.packevent(_reply, {})
else:
aetools.packevent(_reply, {'----':rv})
def code(x):
"Convert a long int to the 4-character code it really is"
s = ''
for i in range(4):
x, c = divmod(x, 256)
s = chr(c) + s
return s
class _Test(AEServer, MiniApplication):
"""Mini test application, handles required events"""
def __init__(self):
MiniApplication.__init__(self)
AEServer.__init__(self)
self.installaehandler('aevt', 'oapp', self.open_app)
self.installaehandler('aevt', 'quit', self.quit)
self.installaehandler('****', '****', self.other)
self.mainloop()
def quit(self, **args):
self._quit()
def open_app(self, **args):
pass
def other(self, _object=None, _class=None, _type=None, **args):
print 'AppleEvent', (_class, _type), 'for', _object, 'Other args:', args
if __name__ == '__main__':
_Test()
|
"""
Test basic std::weak_ptr functionality.
"""
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestSharedPtr(TestBase):
mydir = TestBase.compute_mydir(__file__)
@add_test_categories(["libc++"])
@skipIf(compiler=no_match("clang"))
def test(self):
self.build()
lldbut | il.run_to_source_breakpoint(self,
"// Set break point at this line.", lldb.SBFileSpec("main.cpp"))
self.runCmd("settings set target.import-std-module true")
self.expect("expr (int)*w.lock()", substrs=['(int) $0 = 3'])
self.expect("expr (int)(*w.lock() = 5)", substrs=['(int) $1 = 5'])
self.expect("expr (int)*w.lock()", substrs=['(int) $2 = 5'])
self.expect("expr w.use_count()", substrs=['(long) $3 = 1'])
self.expect("expr w.reset()") |
self.expect("expr w.use_count()", substrs=['(long) $4 = 0'])
|
"""Support for Alexa skill auth."""
import asyncio
from datetime import timedelta
import json
import logging
import aiohttp
import async_timeout
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.util import dt
_LOGGER = logging.getLogger(__name__)
LWA_TOKEN_URI = "https://api.amazon.com/auth/o2/token"
LWA_HEADERS = {"Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"}
PREEMPTIVE_REFRESH_TTL_IN_SECONDS = 300
STORAGE_KEY = "alexa_auth"
STORAGE_VERSION = 1
STORAGE_EXPIRE_TIME = "expire_time"
STORAGE_ACCESS_TOKEN = "access_token"
STORAGE_REFRESH_TOKEN = "refresh_token"
class Auth:
"""Handle authentication to send events to Alexa."""
def __init__(self, hass, client_id, client_secret):
"""Initialize the Auth class."""
self.hass = hass
self.client_id = client_id
self.client_secret = client_secret
self._prefs = None
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
self._get_token_lock = asyncio.Lock()
async def async_do_auth(self, accept_grant_code):
"""Do authentication with an AcceptGrant code."""
# access token not retrieved yet for the first time, so this should
# be an access token request
lwa_params = {
"grant_type": "authorization_code",
"code": accept_grant_code,
"client_id": self.client_id,
"client_secret": self.client_secret,
}
_LOGGER.debug(
"Calling LWA to get the access token (first time), with: %s",
json.dumps(lwa_params),
)
return await self._async_request_new_token(lwa_params)
@callback
def async_invalidate_access_token(self):
"""Invalidate access token."""
self._prefs[STORAGE_ACCESS_TOKEN] = None
async def async_get_access_token(self):
"""Perform access token or token refresh request."""
async with self._get_token_lock:
if self._prefs is None:
await self.async_load_preferences()
if self.is_token_valid():
_LOGGER.debug("Token still valid, using it.")
return self._prefs[STORAGE_ACCESS_TOKEN]
if self._prefs[STORAGE_REFRESH_TOKEN] is None:
_LOGGER.debug("Token invalid and no refresh token available.")
| return None
lwa_params = {
"grant_type": "refresh_token",
"refresh_token": self._prefs[STORAGE_REFRESH_TOKEN],
"client_id": self.client_i | d,
"client_secret": self.client_secret,
}
_LOGGER.debug("Calling LWA to refresh the access token.")
return await self._async_request_new_token(lwa_params)
@callback
def is_token_valid(self):
"""Check if a token is already loaded and if it is still valid."""
if not self._prefs[STORAGE_ACCESS_TOKEN]:
return False
expire_time = dt.parse_datetime(self._prefs[STORAGE_EXPIRE_TIME])
preemptive_expire_time = expire_time - timedelta(
seconds=PREEMPTIVE_REFRESH_TTL_IN_SECONDS
)
return dt.utcnow() < preemptive_expire_time
async def _async_request_new_token(self, lwa_params):
try:
session = aiohttp_client.async_get_clientsession(self.hass)
with async_timeout.timeout(10):
response = await session.post(
LWA_TOKEN_URI,
headers=LWA_HEADERS,
data=lwa_params,
allow_redirects=True,
)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout calling LWA to get auth token.")
return None
_LOGGER.debug("LWA response header: %s", response.headers)
_LOGGER.debug("LWA response status: %s", response.status)
if response.status != 200:
_LOGGER.error("Error calling LWA to get auth token.")
return None
response_json = await response.json()
_LOGGER.debug("LWA response body : %s", response_json)
access_token = response_json["access_token"]
refresh_token = response_json["refresh_token"]
expires_in = response_json["expires_in"]
expire_time = dt.utcnow() + timedelta(seconds=expires_in)
await self._async_update_preferences(
access_token, refresh_token, expire_time.isoformat()
)
return access_token
async def async_load_preferences(self):
"""Load preferences with stored tokens."""
self._prefs = await self._store.async_load()
if self._prefs is None:
self._prefs = {
STORAGE_ACCESS_TOKEN: None,
STORAGE_REFRESH_TOKEN: None,
STORAGE_EXPIRE_TIME: None,
}
async def _async_update_preferences(self, access_token, refresh_token, expire_time):
"""Update user preferences."""
if self._prefs is None:
await self.async_load_preferences()
if access_token is not None:
self._prefs[STORAGE_ACCESS_TOKEN] = access_token
if refresh_token is not None:
self._prefs[STORAGE_REFRESH_TOKEN] = refresh_token
if expire_time is not None:
self._prefs[STORAGE_EXPIRE_TIME] = expire_time
await self._store.async_save(self._prefs)
|
import requests
import time
import json
# ------------ Do data need to roll
def roll_data_if_needed(secondsAllowed):
# calculate last time
last_roll_time = get_status()
now = int(time.time())
time_since_roll = now - last_roll_time
# do I need to roll?
if time_since_roll > secondsAllowed:
| roll_data()
else:
print 'do not roll'
# ------------ Get current status
def get_status():
response = firebaseGet('status.json')
json = response.json()
return json.get('lastRollTime')
# ------------ Roll the data
def roll_data():
# delete backu | p
firebaseDelete('temperatures-backup.json')
# update status time
data = '{{"lastRollTime": {:d}}}'.format(int(time.time()))
firebasePut('status.json', data)
# get current values
response = firebaseGet('temperatures.json')
current_values = response.text
# add to backup
firebasePut('temperatures-backup.json', current_values)
# delete current values
firebaseDelete('temperatures.json')
# ------------ Firebase calls
def firebaseGet(path):
return requests.get(getFirebaseUrl(path), params=getFirebaseQueryParams())
def firebasePut(path, data):
requests.put(getFirebaseUrl(path), params=getFirebaseQueryParams(), data=data)
def firebaseDelete(path):
return requests.delete(getFirebaseUrl(path), params=getFirebaseQueryParams())
def getFirebaseQueryParams():
return {'auth': config.get('auth')}
def getFirebaseUrl(path):
return '{}/{}/{}'.format(config.get('base_url'), config.get('pi_name'), path)
# ------------ Data setup
config = json.load(open("/home/pi/config.json"))
# ------------ Let's do this
roll_data_if_needed(60*60*24)
|
"""
Tests for the plugin API
"""
from __future__ import ab | solute_import
from django.test import TestCase
from openedx.core.lib.plugins import PluginError
from openedx.core.lib.course_tabs import CourseTabPluginManager
class TestCourseTabApi(TestCase):
"""
Unit tests for the course tab plugin API
"""
def test_get_plugin(self):
"""
Verify that get_plugin works as expected.
"""
tab_type = Cou | rseTabPluginManager.get_plugin("instructor")
self.assertEqual(tab_type.title, "Instructor")
with self.assertRaises(PluginError):
CourseTabPluginManager.get_plugin("no_such_type")
|
nk_stat * scalar are TRUE, i.e. non-contam
sequences. To return contaminants (sequences that FAIL the inequality),
set negate to True."""
i_s = stats_header.index(sample_stat)
i_b = stats_header.index(blank_stat)
passed_otus = set()
for otu in stats_dict:
if((float(stats_dict[otu][i_s]) > (float(scalar) * float(stats_dict[otu][i_b]))) != negate):
passed_otus.add(otu)
# print passed_otus
return(passed_otus)
def calc_per_category_decontam_stats(biom_obj, filter_otus):
reads = biom_obj.filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
otus = biom_obj.pa(inplace = False).filter(lambda val, id_, metadata: id_ in filter_otus,
axis='observation', invert=False, inplace=False).sum(axis = 'sample')
return(reads.tolist(),otus.tolist())
def calc_per_library_decontam_stats(start_biom, output_dict):
# calculate starting number of sequences and unique sequences per library
steps = ['below_relabund_threshold','putative_contaminants','ever_good_seqs','reinstated_seqs','all_good_seqs']
results_dict = {}
results_dict['starting'] = calc_per_category_decontam_stats(start_biom, start_biom.ids(axis='observation'))
results_header = ['starting']
for step in steps:
if step in output_dict:
results_dict[step] = calc_per_category_decontam_stats(start_biom, output_dict[step])
results_header.append(step)
return(results_dict, results_header)
def filter_contaminated_libraries(unique_seq_biom, contaminant_otus, contam_threshold):
# make relabund table
norm_biom = unique_seq_biom.norm(inplace = False)
# filter out sequences marked as contaminants
norm_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=True)
# filter out samples above threshold
norm_biom.filter(lambda val, id_, metadata: sum(val) > contam_threshold,
axis='sample', invert=False, inplace=True)
# filter contam sequences from original biom
filtered_biom = unique_seq_biom.filter(lambda val, id_, metadata: id_ in contaminant_otus,
axis='observation', invert=True, inplace=False)
# filter samples that lost too much relative to starting from original biom
filtered_biom = filtered_biom.filter(lambda val, id_, metadata: id_ in norm_biom.ids(axis='sample'),
axis='sample', invert=False, inplace=True)
return(filtered_biom)
def print_filtered_otu_map(input_otu_map_fp, output_otu_map_fp, filter_set):
output_otu_map_f = open(output_otu_map_fp, 'w')
for line in open(input_otu_map_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# write OTU line if present in the filter set
if seq_identifier in filter_set:
output_otu_map_f.write(line)
output_otu_map_f.close()
return
def print_filtered_mothur_counts(mothur_counts_fp, output_counts_fp, filter_set):
output_counts_f = open(output_counts_fp, 'w')
t = 0
for line in open(mothur_counts_fp, 'U'):
seq_identifier = line.strip().split('\t')[0]
# only write this line if the otu has more than n sequences (so
# greater than n tab-separated fields including the otu identifier)
# or if it's the header (first) line
if seq_identifier in filter_set or t == 0:
output_counts_f.write(line)
t += 1
output_counts_f.close()
return
def print_per_library_stats(per_library_stats, per_library_stats_header, lib_ids, dropped_libs=[]):
outline = 'Library\t'
outline += '_reads\t'.join(per_library_stats_header) + '_reads\t'
outline += '_otus\t'.join(per_library_stats_header) + '_otus'
if len(dropped_libs) > 0:
outline += '\tlibrary_discarded'
discard = True
else:
discard = False
outline += '\n'
t = 0
for lib in lib_ids:
outline += lib
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][0][t]))
for category in per_library_stats_header:
outline += '\t' + str(int(per_library_stats[category][1][t]))
if discard:
if lib in dropped_libs:
outline += '\tTrue'
else:
outline += '\tFalse'
outline += '\n'
t += 1
return(outline)
def print_otu_disposition(input_seqs, output_dict, hierarchy=[]):
outline = ''
if hierarchy == []:
hierarchy = ['below_relabund_threshold', 'putative_contaminants','reinstated_seqs','ever_good_seqs']
# Subset hierarchy to levels also in output dict:
hierarchy = [x for x in hierarchy if x in output_dict]
# Check that the levels of the hierarchy are non-overlapping:
for x in range(len(hierarchy) - 1):
for y in range(x + 1,len(hierarchy)):
if not output_dict[hierarchy[x]].isdisjoint(output_dict[hierarchy[y]]):
print('warning: non-disjoint sets in the disposition hierarchy')
seqs_left = set(input_seqs)
for seq in input_seqs:
for level in hierarchy:
if seq in output_dict[level]:
outline += '{0}\t{1}\n'.format(seq,level)
break
return(outline)
def print_filtered_seq_headers(seq_headers, output_headers_fp, filter_set):
output_headers_f = open(output_headers_fp, 'w')
for x in seq_headers:
if x in filter_set:
output_headers_f.write('{0}\n'.format(x))
output_headers_f.close()
return
def print_filtered_output(output_method, unfiltered_input, output_dir, output_dict, output_categories=None):
output_fn = 'print_filtered_' + output_method
if not output_categories:
output_categories = output_dict.keys()
if output_method == 'seq_headers':
output_fn = print_filtered_seq_headers
elif output_method == 'mothur_counts':
output_fn = print_filtered_mothur_counts
elif output_method == 'otu_map':
output_fn = print_filtered_otu_map
for category in output_categories:
output_fn(unfiltered_input,
os.path.join(output_dir,
'{0}_{1}.txt'.format(category, output_method)),
output_dict[category])
return
def print_results_file(seq_ids,
output_dict,
output_fp,
stats_header=None,
stats_dict=None,
corr_data_dict=None):
output_f = open(output_fp, 'w')
header = "SeqID"
sorted_categories = sorted(output_dict.keys())
for category in sorted_categories:
header += '\t{0}'.format(category)
if stats_header:
for x in stats_header:
header += '\t{0}'.format(x)
if corr_data_dict:
header += '\t{0}\t{1}'.format('spearman_r','spearman_p')
output_f.write(header + '\n')
for otu in seq_ids:
outline = str(otu)
for category in sorted_categories:
outline += '\t{0}'.format(1 if otu in output_dict[category] else 0)
if stats_header:
t = 0
for x in stats_header:
outline += '\t{0:.3f}'.format(stats_dict[otu][t])
t += 1
if corr_data_dict:
outline += '\t{0:.3f}\t{1:.3f}'.format(
corr_data_dict[otu][0],
corr_data_dict[otu][1])
output_f.write(outline + '\n')
return
def main():
option_parser, opts, args = parse_command_line_parameters(**script_info)
otu_table_fp = opts.otu_table_fp
mothur_counts_fp = opts.mothur_counts_fp
mapping_fp = opts.mapping_fp
valid_state | s = opts.valid_states
blank_id_fp = opts.blank_id_fp
contaminant_db_fp = opts.contaminant_db_fp
contaminant_similarity = opts.contaminant_similarity
max_correlation = opts.max_correlation
correlate_header | = opts.correlat |
fter experimenting w/ disabling the USB reset.
from devil.utils import reset_usb # pylint: disable=unused-import
logger = logging.getLogger(__name__)
from py_utils import modules_util
# Script depends on features from psutil version 2.0 or higher.
modules_util.RequireVersion(psutil, '2.0')
def KillAllAdb():
def get_all_adb():
for p in psutil.process_iter():
try:
# Retrieve all required process infos at once.
pinfo = p.as_dict(attrs=['pid', 'name', 'cmdline'])
if pinfo['name'] == 'adb':
pinfo['cmdline'] = ' '.join(pinfo['cmdline'])
yield p, pinfo
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for sig in [signal.SIGTERM, signal.SIGQUIT, signal.SIGKILL]:
for p, pinfo in get_all_adb():
try:
pinfo['signal'] = sig
logger.info('kill %(signal)s %(pid)s (%(name)s [%(cmdline)s])', pinfo)
p.send_signal(sig)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
for _, pinfo in get_all_adb():
try:
logger.error('Unable to kill %(pid)s (%(name)s [%(cmdline)s])', pinfo)
except (psutil.NoSuchProcess, psutil.AccessDenied):
pass
def TryAuth(device):
"""Uses anything in ~/.android/ that looks like a key to auth with the device.
Args:
device: The DeviceUtils device to attempt to auth.
Returns:
True if device successfully authed.
"""
possible_keys = glob.glob(os.path.join(adb_wrapper.ADB_HOST_KEYS_DIR, '*key'))
if len(possible_keys) <= 1:
logger.warning('Only %d ADB keys available. Not forcing auth.',
len(possible_keys))
return False
KillAllAdb()
adb_wrapper.AdbWrapper.StartServer(keys=possible_keys)
new_state = device.adb.GetState()
if new_state != 'device':
logger.error('Auth failed. Device %s still stuck in %s.', str(device),
new_state)
return False
# It worked! Now register the host's default ADB key on the device so we don't
# have to do all that again.
pub_key = os.path.join(adb_wrapper.ADB_HOST_KEYS_DIR, 'adbkey.pub')
if not os.path.exists(pub_key): # This really shouldn't happen.
logger.error('Default ADB key not available at %s.', pub_key)
return False
with open(pub_key) as f:
pub_key_contents = f.read()
try:
device.WriteFile(adb_wrapper.ADB_KEYS_FILE, pub_key_contents, as_root=True)
except (device_errors.CommandTimeoutError, device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Unable to write default ADB key to %s.', str(device))
return False
return True
def RecoverDevice(device, denylist, should_reboot=lambda device: True):
if device_status.IsDenylisted(device.adb.GetDeviceSerial(), denylist):
logger.debug('%s is denylisted, skipping recovery.', str(device))
return
if device.adb.GetState() == 'unauthorized' and TryAuth(device):
logger.info('Successfully authed device %s!', str(device))
return
if should_reboot(device):
should_restore_root = device.HasRoot()
try:
device.WaitUntilFullyBooted(retries=0)
except (device_errors.CommandTimeoutError, device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception(
'Failure while waiting for %s. '
'Attempting to recover.', str(device))
try:
try:
device.Reboot(block=False, timeout=5, retries=0)
except device_errors.CommandTimeoutError:
logger.warning(
'Timed out while attempting to reboot %s normally.'
'Attempting alternative reboot.', str(device))
# The device drops offline before we can grab the exit code, so
# we don't check for status.
try:
device.adb.Root()
finally:
# We are already in a failure mode, attempt to reboot regardless of
# what device.adb.Root() returns. If the sysrq reboot fails an
# exception willbe thrown at that level.
device.adb.Shell(
'echo b > /proc/sysrq-trigger',
expect_status=None,
timeout=5,
retries=0)
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failed to reboot %s.', str(device))
i | f denylist:
denylist.Extend([device.adb.GetDeviceSeri | al()], reason='reboot_failure')
except device_errors.CommandTimeoutError:
logger.exception('Timed out while rebooting %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
try:
device.WaitUntilFullyBooted(
retries=0, timeout=device.REBOOT_DEFAULT_TIMEOUT)
if should_restore_root:
device.EnableRoot()
except (device_errors.CommandFailedError,
device_errors.DeviceUnreachableError):
logger.exception('Failure while waiting for %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_failure')
except device_errors.CommandTimeoutError:
logger.exception('Timed out while waiting for %s.', str(device))
if denylist:
denylist.Extend([device.adb.GetDeviceSerial()], reason='reboot_timeout')
def RecoverDevices(devices, denylist, enable_usb_reset=False):
"""Attempts to recover any inoperable devices in the provided list.
Args:
devices: The list of devices to attempt to recover.
denylist: The current device denylist, which will be used then
reset.
"""
statuses = device_status.DeviceStatus(devices, denylist)
should_restart_usb = set(
status['serial'] for status in statuses
if (not status['usb_status'] or status['adb_status'] in ('offline',
'missing')))
should_restart_adb = should_restart_usb.union(
set(status['serial'] for status in statuses
if status['adb_status'] == 'unauthorized'))
should_reboot_device = should_restart_usb.union(
set(status['serial'] for status in statuses if status['denylisted']))
logger.debug('Should restart USB for:')
for d in should_restart_usb:
logger.debug(' %s', d)
logger.debug('Should restart ADB for:')
for d in should_restart_adb:
logger.debug(' %s', d)
logger.debug('Should reboot:')
for d in should_reboot_device:
logger.debug(' %s', d)
if denylist:
denylist.Reset()
if should_restart_adb:
KillAllAdb()
adb_wrapper.AdbWrapper.StartServer()
for serial in should_restart_usb:
try:
# TODO(crbug.com/642194): Resetting may be causing more harm
# (specifically, kernel panics) than it does good.
if enable_usb_reset:
reset_usb.reset_android_usb(serial)
else:
logger.warning('USB reset disabled for %s (crbug.com/642914)', serial)
except IOError:
logger.exception('Unable to reset USB for %s.', serial)
if denylist:
denylist.Extend([serial], reason='USB failure')
except device_errors.DeviceUnreachableError:
logger.exception('Unable to reset USB for %s.', serial)
if denylist:
denylist.Extend([serial], reason='offline')
device_utils.DeviceUtils.parallel(devices).pMap(
RecoverDevice,
denylist,
should_reboot=lambda device: device.serial in should_reboot_device)
def main():
parser = argparse.ArgumentParser()
logging_common.AddLoggingArguments(parser)
script_common.AddEnvironmentArguments(parser)
parser.add_argument('--denylist-file', help='Device denylist JSON file.')
parser.add_argument(
'--known-devices-file',
action='append',
default=[],
dest='known_devices_files',
help='Path to known device lists.')
parser.add_argument(
'--enable-usb-reset', action='store_true', help='Reset USB if necessary.')
args = parser.parse_args()
logging_common.InitializeLogging(args)
script_common.InitializeEnvironment(args)
denylist = (device_denylist.Denylist(args.denylist_file)
if args.denylist_file else None)
expected_devices = device_status.GetExpectedDevices(args.known_devices_files) |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2018-2019 Matthias Klumpp <matthias@tenstral.net>
#
# Licensed under the GNU Lesser General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the license, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import os
import zmq
import json
import logging as log
from zmq.eventloop import ioloop, zmqstream
from laniakea.msgstream import verify_event_message, event_message_is_valid_and_signed
class EventsReceiver:
'''
Lighthouse module handling event stream submissions,
registering them and publishing them to the world.
'''
def __init__(self, endpoint, pub_queue):
from glob import glob
from laniakea.localconfig import LocalConfig
from laniakea.msgstream import keyfile_read_verify_key
self._socket = None
self._ctx = zmq.Context.instance()
self._pub_queue = pub_queue
self._endpoint = endpoint
self._trusted_keys = {}
# TODO: Implement auto-reloading of valid keys list if directory changes
for keyfname in glob(os.path.join(LocalConfig().trusted_curve_keys_dir, '*')):
signer_id, verify_key = keyfile_read_verify_key(keyfname)
if signer_id and verify_key:
self._trusted_keys[signer_id] = verify_key
def _event_message_received(self, socket, msg):
data = str(msg[1], 'utf-8', 'replace')
try:
event = json.loads(data)
except json.JSONDecodeError as e:
# we ignore invalid requests
log.info('Received invalid JSON message from sender: %s (%s)', data if len(data) > 1 else msg, str(e))
return
# check if the message is actually valid and can be processed
if not event_message_is_valid_and_signed(event):
# we currently just silently ignore invalid submissions
return
signatures = event.get('signatures')
signature_checked = False
for signer in signatures.keys():
key = self._trusted_keys.get(signer)
if not key:
continue
try:
verify_event_message(signer, event, key, assume_valid=True)
except Exception as e:
log.info('Invalid signature on event ({}): {}'.format(str(e), str(event)))
return
# if we are here, we verified a signature without issues, which means
# the message is legit and we can sign it ourselves and publish it
signature_checked = True
if not signature_checked:
log.info('Unable to verify signature on event: {}'.format(str(event)))
return
# now publish t | he event to the world
self._pub_queue.put([bytes(event['tag'], 'utf-8'),
bytes(data, 'utf-8')])
def run(self): |
if self._socket:
log.warning('Tried to run an already running event receiver again.')
return
self._socket = self._ctx.socket(zmq.ROUTER)
self._socket.bind(self._endpoint)
server_stream = zmqstream.ZMQStream(self._socket)
server_stream.on_recv_stream(self._event_message_received)
ioloop.IOLoop.instance().start()
|
from __future__ import absolute_import
from collections import namedtuple
# Other useful structs
TopicPartition = namedtuple("TopicPartition",
["topic", "partition"])
| BrokerMetadata = namedtuple("BrokerMetadata",
["nodeId", "host", "port", "rack"])
PartitionMetadata = namedtuple("PartitionMetadata",
["topic", "partition", "leader", "replicas", "isr", "error"])
OffsetAndMetadata = namedtuple("OffsetAndMetadata",
# TODO | add leaderEpoch: OffsetAndMetadata(offset, leaderEpoch, metadata)
["offset", "metadata"])
OffsetAndTimestamp = namedtuple("OffsetAndTimestamp",
["offset", "timestamp"])
# Define retry policy for async producer
# Limit value: int >= 0, 0 means no retries
RetryOptions = namedtuple("RetryOptions",
["limit", "backoff_ms", "retry_on_timeouts"])
|
import argparse
i | mport changeling_client.api
import changeling_client.commands
parser = argparse.ArgumentParser()
parser.add_argument('--endpoint', required=True)
subparsers = parser.add_subparsers()
changeling_client.commands.register(subparsers)
def main():
args = parser.parse_args()
service = changeling_client.api.Service(args.endpoin | t)
args.func(service, args)
|
ance of class.
@param version: Version must either be a string or a tuple of integers
or strings representing integers.
Version strings must begin with integer numbers separated by dots and
may end with any string.
"""
self._versionstr = '.'.join([str(v) for v in self])
def __new__(cls, version):
"""Static method for creating a new instance which is a subclass of
immutable tuple type. Versions are parsed and stored as a tuple of
integers internally.
@param cls: Class
@param version: Version must either be a string or a tuple of integers
or strings representing integers.
Version strings must begin with integer numbers separated by dots and
may end with any string.
"""
if isinstance(version, basestring):
mobj = re.match('(?P<version>\d+(\.\d+)*)(?P<suffix>.*)$', version)
if mobj:
version = [int(i) for i in mobj.groupdict()['version'].split('.')]
return tuple.__new__(cls, version)
else:
raise ValueError('Invalid version string format.')
else:
try:
return tuple.__new__(cls, [int(v) for v in version])
except:
raise TypeError("Version must either be a string or an iterable"
" of integers.")
def __str__(self):
| """Returns string representation of version.
"""
return self._versionstr
class TableFilter:
"""Class for filtering rows of tables based on filters on values of columns.
The tables are represented as nested lists (list of lists of columns.)
"""
def __init__(self):
"""Initialize Filter."""
self._filters = {}
def registerFilter(self, column, patterns, is_regex=False,
| ignore_case=False):
"""Register filter on a column of table.
@param column: The column name.
@param patterns: A single pattern or a list of patterns used for
matching column values.
@param is_regex: The patterns will be treated as regex if True, the
column values will be tested for equality with the
patterns otherwise.
@param ignore_case: Case insensitive matching will be used if True.
"""
if isinstance(patterns, basestring):
patt_list = (patterns,)
elif isinstance(patterns, (tuple, list)):
patt_list = list(patterns)
else:
raise ValueError("The patterns parameter must either be as string "
"or a tuple / list of strings.")
if is_regex:
if ignore_case:
flags = re.IGNORECASE
else:
flags = 0
patt_exprs = [re.compile(pattern, flags) for pattern in patt_list]
else:
if ignore_case:
patt_exprs = [pattern.lower() for pattern in patt_list]
else:
patt_exprs = patt_list
self._filters[column] = (patt_exprs, is_regex, ignore_case)
def unregisterFilter(self, column):
"""Unregister filter on a column of the table.
@param column: The column header.
"""
if self._filters.has_key(column):
del self._filters[column]
def registerFilters(self, **kwargs):
"""Register multiple filters at once.
@param **kwargs: Multiple filters are registered using keyword
variables. Each keyword must correspond to a field name
with an optional suffix:
field: Field equal to value or in list of
values.
field_ic: Field equal to value or in list of
values, using case insensitive
comparison.
field_regex: Field matches regex value or matches
with any regex in list of values.
field_ic_regex: Field matches regex value or matches
with any regex in list of values
using case insensitive match.
"""
for (key, patterns) in kwargs.items():
if key.endswith('_regex'):
col = key[:-len('_regex')]
is_regex = True
else:
col = key
is_regex = False
if col.endswith('_ic'):
col = col[:-len('_ic')]
ignore_case = True
else:
ignore_case = False
self.registerFilter(col, patterns, is_regex, ignore_case)
def applyFilters(self, headers, table):
"""Apply filter on ps command result.
@param headers: List of column headers.
@param table: Nested list of rows and columns.
@return: Nested list of rows and columns filtered using
registered filters.
"""
result = []
column_idxs = {}
for column in self._filters.keys():
try:
column_idxs[column] = headers.index(column)
except ValueError:
raise ValueError('Invalid column name %s in filter.' % column)
for row in table:
for (column, (patterns,
is_regex,
ignore_case)) in self._filters.items():
col_idx = column_idxs[column]
col_val = row[col_idx]
if is_regex:
for pattern in patterns:
if pattern.search(col_val):
break
else:
break
else:
if ignore_case:
col_val = col_val.lower()
if col_val in patterns:
pass
else:
break
else:
result.append(row)
return result
class Telnet(telnetlib.Telnet):
__doc__ = telnetlib.Telnet.__doc__
def __init__(self, host=None, port=0, socket_file=None,
timeout=socket.getdefaulttimeout()):
"""Constructor.
When called without arguments, create an unconnected instance.
With a host argument, it connects the instance using TCP; port number
and timeout are optional, socket_file must be None.
With a socket_file argument, it connects the instance using
named socket; timeout is optional and host must be None.
"""
telnetlib.Telnet.__init__(self, timeout=timeout)
if host is not None or socket_file is not None:
self.open(host, port, socket_file, timeout=timeout)
def open(self, host=None, port=0, socket_file=None,
timeout=socket.getdefaulttimeout()):
"""Connect to a host.
With a host argument, it connects the instance using TCP; port number
and timeout are optional, socket_file must be None. The port number
defaults to the standard telnet port (23).
With a socket_file argument, it connects the instance using
named socket; timeout is optional and host must be None.
Don't try to reopen an already connected instance.
"""
self.socket_file = socket_file
if host is not None:
if sys.version_info[:2] >= (2,6):
telnetlib.Telnet.open(self, host, port, timeout)
else:
telnetlib.Telnet.open(self, host, port)
elif s |
"""EAS two-devices turn
Revision ID: 17dc9c049f8b
Revises: ad7b856bcc0
Create Date: 2014-10-21 20:38:14.311747
"""
# revision identifiers, used by Alembic.
revision = '17dc9c049f8b'
down_revision = 'ad7b856bcc0'
from datetime import datetime
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
from inbox.ignition import main_engine
engine = main_engine()
if not engine.has_table('easaccount'):
return
from inbox.models.session import session_scope
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
class EASAccount(Base):
__table__ = Base.metadata.tables['easaccount']
primary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.primary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
secondary_device = sa.orm.relationship(
'EASDevice', primaryjoin='and_(EASAccount.secondary_device_id == EASDevice.id, '
'EASDevice.deleted_at.is_(None))', uselist=False)
class EASDevice(Base):
__table__ = Base.metadata.tables['easdevice']
with session_scope(versioned=False) as \
db_session:
accts = db_session.query(EASAccount).all()
for a in accts:
# Set both to filtered=False, //needed// for correct deploy.
primary = EASDevice(created_at=datetime.utcnow(),
updated_at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sync_key)
secondary = EASDevice(created_at=datetime.utcnow(),
updated | _at=datetime.utcnow(),
filtered=False,
eas_device_id=a._eas_device_id,
eas_device_type=a._eas_device_type,
eas_policy_key=a.eas_policy_key,
eas_sync_key=a.eas_account_sy | nc_key)
a.primary_device = primary
a.secondary_device = secondary
db_session.add(a)
db_session.commit()
conn = op.get_bind()
acct_device_map = dict(
(id_, device_id) for id_, device_id in conn.execute(text(
"""SELECT id, secondary_device_id from easaccount""")))
print 'acct_device_map: ', acct_device_map
for acct_id, device_id in acct_device_map.iteritems():
conn.execute(text("""
UPDATE easfoldersyncstatus
SET device_id=:device_id
WHERE account_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
conn.execute(text("""
UPDATE easuid
SET device_id=:device_id
WHERE easaccount_id=:acct_id
"""), device_id=device_id, acct_id=acct_id)
def downgrade():
raise Exception('!')
|
import asyncio
import pip
import pickle
import os.path
try:
import discord
except:
pip.main(['install', 'git+https://github.com/Rapptz/discord.py@async#egg=discord.py'])
try:
import win_unicode_console
except:
0
try:
from lxml import html
except ImportError:
pip.main(['install', 'lxml'])
try:
from bs4 import BeautifulSoup
except ImportError:
pip.main(['install', 'BeautifulSoup4'])
from config import Config
from kissanimeConnector import KissDownloader
class User(object):
def __init__(self, userId, userUrl):
self.discordUser = discord.User()
self.discordUser.id = userId
self.id = userId
self.kissUrl = userUrl
self.malUrl = ''
self.ttsChannel = ''
class AnimeBot(discord.Client):
def __init__(self, config_file='config/options.txt', user_file='config/users.txt'):
super().__init__()
self.config = Config(config_file)
self.kissAnime = KissDownloader()
self.users = []
if os.path.isfile(user_file):
with open(user_file, 'rb') as file:
self.users = pickle.load(file)
def run(self):
return super().run(self.config.username, self.config.password)
async def event_loop(self):
await asyncio.sleep(1)
while True:
for user in self.users:
try:
await self.check_for_user(user)
except Exception as e:
print(e)
print('Could not check updates for %s' % user.id)
await asyncio.sleep(300)
async def on_ready(self):
try:
win_unicode_console.enable()
except:
0
await self.change_status(game=discord.Game(name='with Eruru\'s tail'))
print('Connected!\n')
print('Username: %s' % self.user.name)
print('Bot ID: %s' % self.user.id)
print()
print('Command prefix is %s'% self.config.command_prefix)
print()
print('--Connected Servers List--')
if self.servers:
[print(s) for s in self.servers]
else:
print('No servers have been joined yet.')
print()
print('--Users Registered--')
if len(self.users) > 0:
for user in self.users:
print(user.id + ' - ' + user.kissUrl)
else:
print('No users have registered yet.')
print()
print('--Log--')
handler = getattr(self, 'event_loop', None)
await handler()
async def on_message(self, message):
if (message.channel.is_private) and (message.author != self.user) and (message.content.startswith(self.config.command_prefix)):
command = message.content[:message.content.find(' ')].replace(self.config.command_prefix, '')
data = message.content[message.content.find(' ')+1:]
if command == 'register':
if data.startswith('https://kissanime.to/MyList/'):
self.handle_register_user(message.author.id, data)
elif data.isdigit():
self.handle_register_user(message.author.id, 'https://kissanime.to/MyList/' + data)
if command == 'settts':
self.handle_set_tts(message.author.id, data)
def handle_register_user(self, userId, userUrl):
if self.get_user(userId) == 0:
user = User(userId, userUrl)
self.users.append(user)
print('Added user \'%s\' with url \'%s\'' % (userId, userUrl))
else:
self.get_user(userId).userUrl = userUrl
print('Updated bookmark url for user \'%s\'' % userId)
with open('config/users.txt', 'wb') as file:
pickle.dump(self.users, file)
def handle_set_tts(self, userId, channel):
user = self.get_user(userId)
if not user == 0:
user.ttsChannel = channel
print('Updated tts channel for \'%s\'' % userId)
with open('config/users.txt', 'wb') as file:
pickle.dump(self.users, file)
def get_user(self, userId):
for user in self.users:
if user.id == userId:
return user
return 0
async def check_for_user(self, user):
print('Checking bookmarks for \'%s\'...' % user.id)
cachedFilePath = 'cache/%s.dat' % user.id
kissDomain = 'https://kissanime.to'
colonId = '(*:*)'
# Download the users bookmark page
if os.path.isfile('bookmarkpage.html'):
with open('bookmarkpage.html', 'r') as file:
bookmarkPage = file.read()
else:
bookmarkPage = self.kissAnime.downloadPage(user.kissUrl).replace('\\r\\n', '')
#with open('bookmarkpage.html', 'w') as file:
# file.write(bookmarkPage)
# Turn the page into a list
newList = self.kiss_list_from_bookmarks(bookmarkPage)
# Load the old list from the file
oldList = {}
if os.path.isfile(cachedFilePath):
for line in open(cachedFilePath, 'r'):
try:
key, value = line.strip().split(': ')
key = key.replace(colonId, ':')
oldList[key] = tuple(value.replace('\'', '').replace('(', '').replace(')', '').split(', '))
except:
0 #best code evah
# Compare the lists and send messages
for key, newValue in newList.items():
try:
oldValue = oldList[key]
except:
oldValue = (newValue[0], '')
if oldValue[0] != newValue[0]:
await self.send_message(user.discordUser, 'The anime **%s** has just aired episode %s!\n%s' % (key, newValue[0], kissDomain + newValue[1]))
if (user.ttsChannel) and not (user.ttsChannel == ''):
channel = self.get_channel_class(user.ttsChannel.split('/')[0], user.ttsChannel.split('/')[1])
if not channel == 0:
message = 'The any may %s has just aired episode %s!' % (key.replace('.', '').replace('!', '').replace(',', '').replace(':', '').replace(';', ''), newValue[0])
await self.sen | d_message(channel, message, tts=True)
# Save the new list into the file
with open(cachedFilePath, 'w') as file:
for key, value in newList.items():
file.write('%s: %s\n' % (key.replace(':', colonId), value))
print('Done checking bookmarks for \'%s\'!' % user.id)
def get_channel_class(self, serverId, channelId):
for server in self.servers:
if server.id == serverId:
for channel | in server.channels:
if channel.id == channelId:
return channel
return 0
def kiss_list_from_bookmarks(self, content):
dataList = {}
table = content[content.find('<table class="listing">'):content.find('</table>')]
table = table[table.find('<tr class="trAnime'):table.find('</tbody>')]
rows = table.split('</tr>')
del rows[-1]
for row in rows:
try:
row += '</tr>'
soup = BeautifulSoup(row, 'html.parser')
key = soup.find_all('a')[1].string.strip()
episode = soup.find_all('a')[2].string.replace('Episode', '').replace('(', '[').replace(')', ']').strip()
link = soup.find_all('a')[1].get('href')
dataList[key] = (episode, link)
except:
0
return dataList
def kiss_latest_episode(self, content):
bowl = BeautifulSoup(content, 'html.parser').table
soup = BeautifulSoup(str(bowl), 'html.parser')
episode = soup.find_all('a')[0].string[-3:]
return episode
if __name__ == '__main__':
bot = AnimeBot()
bot.run()
|
'''trec_dd.* namespace package can have several subpackages, see
http://github.com/trec-dd for more info
.. This software is released under an MIT/X11 open source license.
Copyright 2015 Diffeo, Inc.
'''
im | port pkg_resources
pkg_resou | rces.declare_namespace(__name__)
|
with a too long section of type waiting
"""
# early | returns
# if there is no transfer it won't have any waiting sections
if journey.nb_transfers == 0:
return True
if journey.duration < self.max_waiting_duration:
return True
for s in journey.sections:
if s.type != response_pb2.WAITING:
continue
if s.duration < self.max_waiting_duration:
| continue
return False
return True
class FilterMaxSuccessivePhysicalMode(SingleJourneyFilter):
message = 'too_much_successive_section_same_physical_mode'
def __init__(self, successive_physical_mode_to_limit_id='physical_mode:Bus', max_successive_physical_mode=3):
self.successive_physical_mode_to_limit_id = successive_physical_mode_to_limit_id
self.max_successive_physical_mode = max_successive_physical_mode
def filter_func(self, journey):
"""
eliminates journeys with specified public_transport.physical_mode more than
_max_successive_physical_mode (used for STIF buses)
"""
bus_count = 0
for s in journey.sections:
if s.type != response_pb2.PUBLIC_TRANSPORT:
continue
if s.pt_display_informations.uris.physical_mode == self.successive_physical_mode_to_limit_id:
bus_count += 1
else:
if bus_count <= self.max_successive_physical_mode:
bus_count = 0
if bus_count > self.max_successive_physical_mode:
return False
return True
class FilterMinTransfers(SingleJourneyFilter):
message = 'not_enough_connections'
def __init__(self, min_nb_transfers=0):
self.min_nb_transfers = min_nb_transfers
def filter_func(self, journey):
"""
eliminates journeys with number of connections less than min_nb_transfers among journeys
"""
if get_nb_connections(journey) < self.min_nb_transfers:
return False
return True
class FilterDirectPath(SingleJourneyFilter):
message = 'direct_path_parameter'
def __init__(self, dp='indifferent'):
self.dp = dp
def filter_func(self, journey):
"""
eliminates journeys that are not matching direct path parameter (none, only or indifferent)
"""
if self.dp == 'none' and 'non_pt' in journey.tags:
return False
elif self.dp == 'only' and 'non_pt' not in journey.tags:
return False
return True
class FilterDirectPathMode(SingleJourneyFilter):
message = 'direct_path_mode_parameter'
def __init__(self, dp_mode):
self.dp_mode = [] if dp_mode is None else dp_mode
def filter_func(self, journey):
"""
eliminates journeys that are not matching direct path mode parameter
"""
is_dp = 'non_pt' in journey.tags
is_in_direct_path_mode_list = any(mode in self.dp_mode for mode in journey.tags)
# if direct_path of a mode which is not in direct_path_mode[]
if is_dp and not is_in_direct_path_mode_list:
return False
return True
class FilterTooLongDirectPath(SingleJourneyFilter):
message = 'too_long_direct_path'
def __init__(self, instance, request):
self.instance = instance
self.request = request
self.logger = logging.getLogger(__name__)
def _get_mode_of_journey(self, journey):
mode = FallbackModes.modes_str() & set(journey.tags)
if len(mode) != 1:
self.logger.error('Cannot determine the mode of direct path: {}'.format(mode))
return None
return next(iter(mode))
def filter_func(self, journey):
"""
eliminates too long direct_path journey
"""
# we filter only direct path
if 'non_pt' not in journey.tags:
return True
direct_path_mode = self._get_mode_of_journey(journey)
attr_name = 'max_{}_direct_path_duration'.format(direct_path_mode)
max_duration = self.request[attr_name]
return max_duration > journey.duration
def get_best_pt_journey_connections(journeys, request):
"""
Returns the nb of connection of the best pt_journey
Returns None if journeys empty
"""
if not journeys:
return None
best = get_ASAP_journey((j for j in journeys if 'non_pt' not in j.tags), request)
return get_nb_connections(best) if best else None
def get_nb_connections(journey):
"""
Returns connections count in a journey
"""
return journey.nb_transfers
def get_min_waiting(journey):
"""
Returns min waiting time in a journey
"""
return portable_min((s.duration for s in journey.sections if s.type == response_pb2.WAITING), default=0)
def is_walk_after_parking(journey, idx_section):
"""
True if section at given index is a walking after/before parking car/bss, False otherwise
"""
is_park_section = lambda section: section.type in {
response_pb2.PARK,
response_pb2.LEAVE_PARKING,
response_pb2.BSS_PUT_BACK,
response_pb2.BSS_RENT,
}
s = journey.sections[idx_section]
if (
s.type == response_pb2.STREET_NETWORK
and s.street_network.mode == response_pb2.Walking
and (
(idx_section - 1 >= 0 and is_park_section(journey.sections[idx_section - 1]))
or (idx_section + 1 < len(journey.sections) and is_park_section(journey.sections[idx_section + 1]))
)
):
return True
return False
def similar_pt_section_vj(section):
return 'pt:%s' % section.pt_display_informations.uris.vehicle_journey
def similar_pt_section_line(section):
return "pt:{}".format(section.pt_display_informations.uris.line)
def _sn_functor(s):
return 'sn:{}'.format(s.street_network.mode)
def _crow_fly_sn_functor(_s):
return "crow_fly"
def similar_journeys_generator(journey, pt_functor, sn_functor=_sn_functor, crow_fly_functor=_sn_functor):
def _similar_non_pt():
for s in journey.sections:
yield "sn:{} type:{}".format(s.street_network.mode, s.type)
def _similar_pt():
for idx, s in enumerate(journey.sections):
if s.type == response_pb2.PUBLIC_TRANSPORT:
yield pt_functor(s)
elif s.type == response_pb2.STREET_NETWORK and is_walk_after_parking(journey, idx):
continue
elif s.type is response_pb2.STREET_NETWORK:
yield sn_functor(s)
elif s.type is response_pb2.CROW_FLY:
yield crow_fly_functor(s)
if 'non_pt' in journey.tags:
return _similar_non_pt()
return _similar_pt()
def detailed_pt_section_vj(section):
return 'pt:{vj} dep:{dep} arr:{arr}'.format(
vj=section.pt_display_informations.uris.vehicle_journey,
dep=section.begin_date_time,
arr=section.end_date_time,
)
def bss_walking_sn_functor(s):
mode = s.street_network.mode
return "d: {} m: {}".format(
s.duration,
FallbackModes.walking.value if mode in (FallbackModes.walking.value, FallbackModes.bss.value) else mode,
)
def similar_bss_walking_vj_generator(journey):
for v in similar_journeys_generator(journey, detailed_pt_section_vj, bss_walking_sn_functor):
yield v
def similar_journeys_vj_generator(journey):
for v in similar_journeys_generator(journey, similar_pt_section_vj):
yield v
def similar_journeys_line_generator(journey):
for v in similar_journeys_generator(journey, similar_pt_section_line):
yield v
def similar_journeys_line_and_crowfly_generator(journey):
for v in similar_journeys_generator(journey, similar_pt_section_line, crow_fly_functor=_crow_fly_sn_functor):
yield v
def shared_section_generator(journey):
"""
Definition of journeys with a shared section:
- same stop point of departure and arrival
- same number of sections in the journey
"""
# Early return: test if the journeys have the same number of sections
yield len(journey. |
from django import forms
from depotwork.questions.models import Question, Answer
class QuestionForm(forms.ModelForm):
title = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),
max_length=255)
description = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control'}),
max_length=2000)
tags = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),
max_length=255,
required=False,
help_text='Use spaces to separate the tags, such as "asp.net mvc5 javascript"')
class Meta:
m | odel = Question
fields = ['title', 'description', 'tags']
class AnswerForm(forms.ModelForm):
question = forms.ModelChoiceField(widget=forms.HiddenInput(), queryset=Question.objects.all())
description = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control', 'rows':'4'}),
max_length=2000)
class Meta:
model = Answer |
fields = ['question', 'description'] |
"""
==========================================
Author: Tyler Brockett
Username: /u/tylerbrockett
Description: Alert Bot (Formerly sales__bot)
Date Created: 11/13/2015
Date Last Edited: 12/20/2016
Version: v2.0
==========================================
"""
import praw
import traceback
from utils.logger import Logger
from utils.color import Color
from utils import output
from prawcore.exceptions import Redirect
from prawcore.exceptions import Forbidden
class RedditHandler:
def __init__(self, credentials):
output.startup_message(credentials)
self.credentials = credentials
self.reddit = self.connect()
self.NUM_POSTS = 20
def connect(self):
try:
reddit = praw.Reddit(
client_id=self.credentials['client_id'],
client_secret=self.credentials['client_secret'],
password=self.credentials['password'],
user_agent=self.credentials['user_agent'],
username=self.credentials['username'])
return reddit
except:
raise RedditHelperException('Error connecting to Reddit\n\n' + traceback.format_exc())
def disconnect(self):
self.reddit = None
def reset(self):
try:
self.disconnect()
self.reddit = self.connect()
except:
raise RedditHelperException(RedditHelperException.RESET_EXCEPTION + '\n\n' + traceback.format_exc())
def get_instance(self):
return self.reddit
def get_unread(self):
ret = []
unread = self.reddit.inbox.unread(limit=None)
for message in unread:
ret.append(message)
ret.reverse()
return ret
def get_message(self, message_id):
return self.reddit.inbox.message(message_id)
def send_message(self, redditor, subject, body):
try:
self.reddit.redditor(redditor).message(subject, body)
except:
Logger.log(traceback.format_exc(), Color.RED)
raise RedditHelperException(RedditHelperException.SEND_MESSAGE_EXCEPTION)
def get_submissions(self, subreddit):
submissions = []
posts = 200 if (subreddit == 'all') else self.NUM_POSTS
try:
subs = self.reddit.subreddit(subreddit).new(limit=posts)
for submission in subs:
submissions.append(submission)
except Forbidden as e:
Logger.log(traceback.format_exc(), Color.RED)
| return []
except Exception as e:
Logger.log(traceback.format_exc(), Color.RED)
raise RedditHelperException(RedditHelperException.GET_SUBMISSIONS_EXCEPTION)
return submissions
def | get_original_message_id(self, received_message, database):
message = received_message
while message.parent_id and len(database.get_subscriptions_by_message_id(str(message.author), message.id)) == 0:
message = self.reddit.inbox.message(message.parent_id[3:])
return message.id
def check_invalid_subreddits(self, subreddits):
invalid = []
for subreddit in subreddits:
try:
for submission in self.reddit.subreddit(subreddit).new(limit=1):
print('subreddit is valid')
except Redirect: # was praw.errors.InvalidSubreddit without 'len()' around call in the try block
Logger.log(traceback.format_exc(), Color.RED)
invalid.append(subreddit)
return invalid
class RedditHelperException(Exception):
SEND_MESSAGE_EXCEPTION = 'Error sending message'
RESET_EXCEPTION = 'Error resetting connection to Reddit'
GET_SUBMISSIONS_EXCEPTION = 'Error getting submissions'
def __init__(self, error_args):
Exception.__init__(self, 'Reddit Exception: {0}'.format(error_args))
self.errorArgs = error_args
|
#!/usr/bin/python2
import collections
import os
from loranode import RN2483Controller
# from ../_examplify.py import Examplify
import os
os.sys.path.append(os.path.dirname(os.path.abspath('.')))
from _examplify import Examplify
import lora, pmt, osmosdr
from gnuradio import gr, blocks
class ReceiveWhitening:
def __init__(self, sf = 7, output_file = './test_out.csv'):
self.target_freq = 868.1e6
self.sf = sf
self.samp_rate = 1e6
self.capture_freq = 868.0e6
self.offset = -(self.capture_freq - self.target_freq)
self.inputFile = './'
self.outputFile = output_file
self.tempFile = '/tmp/whitening_out'
self.tb = None
def captureSequence(self, inputFile):
self.inputFile = inputFile
if os.path.isfile(self.inputFile):
self.tb = gr.top_block()
self.file_source = blocks.file_source(gr.sizeof_gr_complex*1, self.inputFile, False) # Repeat input: True/False
self.lora_lora_receiver_0 = lora.lora_receiver(self.samp_rate, self.capture_freq, self.offset, self.sf, self.samp_rate)
self.blocks_throttle_0 = blocks.throttle(gr.sizeof_gr_complex*1, self.samp_rate, True)
self.tb.connect( (self.file_source, 0), (self.blocks_throttle_0, 0))
self.tb.connect( (self.blocks_throttle_0, 0), (self.lora_lora_receiver_0, 0))
self.tb.run()
self.tb = None
if os.path.isfile(self.tempFile):
if os.path.isfile(self.outputFile):
inf = open(self.tempFile, 'r')
seq = inf.read()
# print(seq)
out = open(self.outputFile, 'a')
out.write(seq)
out.close()
inf.close()
else:
raise | Exception("[ReceiveWhitening] Outputfile '" + self.outputFile + "' does not exist!")
else:
ra | ise Exception("[ReceiveWhitening] Tempfile '" + self.tempFile + "' does not exist!")
else:
raise Exception("[ReceiveWhitening] Inputfile '" + self.inputFile + "' does not exist!")
if __name__ == '__main__':
ofile = '/tmp/tmp_whitening.cfile'
testset = [ (7, "4/6"), (7, "4/7"), (8, "4/5"), (12, "4/6"), (9, "4/5"), (10, "4/5"), (11, "4/5"), (6, "4/5")]
for settings in testset:
dataf = './test_out_SF{0:d}_CR{1:s}.csv'.format(settings[0], '-'.join(settings[1].split('/')))
out = open(dataf, 'a')
out.close()
examplifr = Examplify(settings[0], settings[1], gains = [32, 38, 38])
whitening = ReceiveWhitening(settings[0], dataf)
for i in range(8):
print("Sample {0:d} of 16".format(i))
examplifr.transmitToFile(['0' * 256] * 4, ofile)
whitening.captureSequence(ofile)
for i in range(8):
print("Sample {0:d} of 16".format(i + 8))
examplifr.transmitToFile(['0' * 256] * 8, ofile)
whitening.captureSequence(ofile)
examplifr = None
whitening = None
|
#
# Allows GTK 3 python applications to exit when CTRL-C is raised
# From https://bugzilla.gnome.org/show_bug.cgi?id=622084
#
# Author: Simon Feltman
# License: Presume same as pygobject
#
import sys
import signal
| from typing import ClassVar, List
from gi.repository import GLib
class InterruptibleLoopContext:
"""
Context Manager for | GLib/Gtk based loops.
Usage of this context manager will install a single GLib unix signal handler
and allow for multiple context managers to be nested using this single handler.
"""
#: Global stack context loops. This is added to per InterruptibleLoopContext
#: instance and allows for context nesting using the same GLib signal handler.
_loop_contexts: ClassVar[List['InterruptibleLoopContext']] = []
#: Single source id for the unix signal handler.
_signal_source_id = None
@classmethod
def _glib_sigint_handler(cls, user_data):
context = cls._loop_contexts[-1]
context._quit_by_sigint = True
context._loop_exit_func()
# keep the handler around until we explicitly remove it
return True
def __init__(self, loop_exit_func):
self._loop_exit_func = loop_exit_func
self._quit_by_sigint = False
def __enter__(self):
# Only use unix_signal_add if this is not win32 and there has
# not already been one.
if sys.platform != 'win32' and not InterruptibleLoopContext._loop_contexts:
# Add a glib signal handler
source_id = GLib.unix_signal_add(
GLib.PRIORITY_DEFAULT, signal.SIGINT, self._glib_sigint_handler, None
)
InterruptibleLoopContext._signal_source_id = source_id
InterruptibleLoopContext._loop_contexts.append(self)
def __exit__(self, exc_type, exc_value, traceback):
context = InterruptibleLoopContext._loop_contexts.pop()
assert self == context
# if the context stack is empty and we have a GLib signal source,
# remove the source from GLib and clear out the variable.
if (
not InterruptibleLoopContext._loop_contexts
and InterruptibleLoopContext._signal_source_id is not None
):
GLib.source_remove(InterruptibleLoopContext._signal_source_id)
InterruptibleLoopContext._signal_source_id = None
if self._quit_by_sigint:
# caught by _glib_sigint_handler()
raise KeyboardInterrupt
|
##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) vers | ion 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IM | PLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
import os
class Texlive(Package):
"""TeX Live is a free software distribution for the TeX typesetting
system. Heads up, it's is not a reproducible installation."""
homepage = "http://www.tug.org/texlive"
# Install from specific site because the texlive mirrors do not
# all update in synchrony.
#
# BEWARE: TexLive updates their installs frequently (probably why
# they call it *Live*...). There is no good way to provide a
# repeatable install of the package.
#
# We're now pulling the installation bits from tug.org's repo of
# historic bits. This means that the checksum for the installer
# itself is stable. Don't let that fool you though, it's still
# installing TeX **LIVE** from e.g. ctan.math.... below, which is
# not reproducible.
version('live', '8f8fc301514c08a89a2e97197369c648',
url='ftp://tug.org/historic/systems/texlive/2017/install-tl-unx.tar.gz')
# There does not seem to be a complete list of schemes.
# Examples include:
# full scheme (everything)
# medium scheme (small + more packages and languages)
# small scheme (basic + xetex, metapost, a few languages)
# basic scheme (plain and latex)
# minimal scheme (plain only)
# See:
# https://www.tug.org/texlive/doc/texlive-en/texlive-en.html#x1-25025r6
variant(
'scheme',
default='small',
values=('minimal', 'basic', 'small', 'medium', 'full'),
description='Package subset to install'
)
depends_on('perl', type='build')
def install(self, spec, prefix):
# Using texlive's mirror system leads to mysterious problems,
# in lieu of being able to specify a repository as a variant, hardwire
# a particular (slow, but central) one for now.
_repository = 'http://ctan.math.washington.edu/tex-archive/systems/texlive/tlnet/'
env = os.environ
env['TEXLIVE_INSTALL_PREFIX'] = prefix
perl = which('perl')
scheme = spec.variants['scheme'].value
perl('./install-tl', '-scheme', scheme,
'-repository', _repository,
'-portable', '-profile', '/dev/null')
|
import os
import qprompt
path = qprompt.ask_str("Enter path to file", | vld=lambda x: os.path.isfile(x))
size = | qprompt.ask_int("Enter number less than 10", vld=lambda x: x < 10)
|
self.assertEqual(response.status_code, 302)
def test_hide_from_not_user(self):
self._create_user_and_login("jo")
response = self.client.get(self.url)
self.assertEqual(response.status_code, 302)
def test_show_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertEqual(response.status_code, 200)
def test_dl_xlsx_xlsform(self):
self._publish_xlsx_file()
response = self.client.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': 'exp_one'
}))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response['Content-Disposition'],
"attachment; filename=exp_one.xlsx")
def test_dl_xls_to_anon_if_public(self):
self.xform.share | d = True
self.xform.save()
response = self.anon.get(reverse(download_xlsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xls_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_x | lsform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_jsonp_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
callback = 'jsonpCallback'
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), {'callback': callback})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.content.startswith(callback + '('), True)
self.assertEqual(response.content.endswith(')'), True)
def test_dl_json_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_json_for_cors_options(self):
response = self.anon.options(reverse(download_jsonform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
allowed_headers = ['Accept', 'Origin', 'X-Requested-With',
'Authorization']
control_headers = response['Access-Control-Allow-Headers']
provided_headers = [h.strip() for h in control_headers.split(',')]
self.assertListEqual(allowed_headers, provided_headers)
self.assertEqual(response['Access-Control-Allow-Methods'], 'GET')
self.assertEqual(response['Access-Control-Allow-Origin'], '*')
def test_dl_xform_to_anon_if_public(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_basic_auth(self):
extra = {
'HTTP_AUTHORIZATION':
http_auth_string(self.login_username, self.login_password)
}
response = self.anon.get(reverse(download_xform, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}), **extra)
self.assertEqual(response.status_code, 200)
def test_dl_xform_for_authenticated_non_owner(self):
self._create_user_and_login('alice', 'alice')
response = self.client.get(reverse(download_xform, kwargs={
'username': 'bob',
'id_string': self.xform.id_string
}))
self.assertEqual(response.status_code, 200)
def test_show_private_if_shared_but_not_data(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertContains(response, 'PRIVATE')
def test_show_link_if_shared_and_data(self):
self.xform.shared = True
self.xform.shared_data = True
self.xform.save()
self._submit_transport_instance()
response = self.anon.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
def test_show_link_if_owner(self):
self._submit_transport_instance()
response = self.client.get(self.url)
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'csv'
}))
self.assertContains(response, reverse(export_list, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string,
'export_type': 'xls'
}))
self.assertNotContains(response, reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
}))
# check that a form with geopoints has the map url
response = self._publish_xls_file(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "gps.xls"))
self.assertEqual(response.status_code, 200)
self.xform = XForm.objects.latest('date_created')
show_url = reverse(show, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
map_url = reverse(map_view, kwargs={
'username': self.user.username,
'id_string': self.xform.id_string
})
response = self.client.get(show_url)
# check that map url doesnt show before we have submissions
self.assertNotContains(response, map_url)
# make a submission
self._make_submission(
os.path.join(
os.path.dirname(__file__), "fixtures", "gps", "instances",
"gps_1980-01-23_20-52-08.xml")
)
self.assertEqual(self.response.status_code, 201)
# get new show view
response = self.client.get(show_url)
self.assertContains(response, map_url)
def test_user_sees_edit_btn(self):
response = self.client.get(self.url)
self.assertContains(response, 'edit</a>')
def test_user_sees_settings(self):
response = self.client.get(self.url)
self.assertContains(response, 'Settings')
def test_anon_no_edit_btn(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'edit</a>')
def test_anon_no_toggle_data_share_btn(self):
self.xform.shared = True
self.xform.save()
response = self.anon.get(self.url)
self.assertNotContains(response, 'PUBLIC</a>')
self.assertNotContains(response, 'PRIVATE</a>')
def test_show_add_sourc_doc_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Source document:')
def test_show_add_supporting_docs_if_owner(self):
response = self.client.get(self.url)
self.assertContains(response, 'Supporting document:')
def test_show_add_supporting_media_if_owner(self):
|
# Copyright (c) 2017 SUSE LLC
import logging
import re
import subprocess
# Run a command, and return the result in string format, stripped. Return None if command fails.
def _run_cmd(cmd_array):
try:
return subprocess.check_output(cmd_array).decode("utf-8").strip()
except subprocess.CalledProcessError as c:
logging.warning('Command {} return error code [{}]:'.format(c.cmd, c.returncode))
return None
def get_repo():
"""Returns the current git repo; or 'Unknown repo' if there is an error."""
repo = _run_cmd(['git', 'ls-remote', '--get-url', 'origin'])
return 'Unknown repo' if repo is None else repo
def get_branch():
"""Returns the current git branch; or 'Unknown branch' if there is an error."""
branch = _run_cmd(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
return 'Unknown branch' if branch is None else branch
def get_hash():
"""Returns the current git commit hash; or 'Unknown commit hash' if | there is an error."""
commithash = _run_cmd(['git', 'rev-parse', '--verify', 'HEAD'])
return 'Unknown commit hash' if commithash is None else commithash
def file_is_dirty(file_path):
"""If a file is new, modified, or delet | ed in git's tracking return True. False otherwise."""
file_status_msg = _run_cmd(['git', 'status', '--untracked-files=all', str(file_path)])
# git outputs filename on a line prefixed by whitespace if the file is new/modified/deleted
if re.match(r'^\s*' + file_path + '$', file_status_msg):
return True
return False
def branch_is_dirty():
"""
If any files are new, modified, or deleted in git's tracking return True. False otherwise.
"""
branch_status_msg = _run_cmd(['git', 'status', '--untracked-files=all', '--porcelain'])
# --porcelain returns no output if no changes
if branch_status_msg:
return True
return False
|
v.log_file+"_dist"+".dat")
logger.info("End test.")
logger.info( "Wall time for all %s", time.time()-tt_glob)
return 42
else:
logger.error("Something wrong in test selection.")
else:
logger.info("This is not a test!")
# Dictionary for statistics.
stats = {}
#######################################
# Data reading or creation
######################################
# Retrieve the positions.
p_1 = mod.hdf5_data(v.path, v.file_1, v.n_sel)
# Binary gif files.
#p_1 = mod.gif2(v.file_1)
#p_2 = mod.gif2(v.file_2)
# Random data for test.
#p_1 = mod.random_data(10000, b_size_1, 0)
#p_2 = mod.random_data(10000, b_size_1, 0)
# Usefull spatial info about the first set.
min_1 = np.amin(p_1, 0)
max_1 = np.amax(p_1, 0)
b_size_1 = max_1 - min_1
offset_1 = np.array([min_1[0],0,0])
if slicing == False:
logger.info("slicing is off.")
p_2 = mod.hdf5_data(v.path, v.file_2, v.n_sel)
elif slicing == True:
logger.info("slicing is on.")
p_2 = mod.hdf5_data_slice(v.path, v.file_2, min_1[0],
max_1[0], v.r_max, v.n_sel)
else:
print "problem with slicing choice"
# Usefull spatial info about the second set.
min_2 = np.amin(p_2, 0)
max_2 = np.amax(p_2, 0)
b_size_2 = max_2 - min_2
offset_2 = np.array([min_2[0],0,0])
logger.info("First set limits %s %s", min_1, max_1)
logger.info("Second set limits %s %s", min_2, max_2)
stats['Set 1'] = p_1.shape[0]
stats['Set 2'] = p_2.shape[0]
logger.info("path is %s", v.path)
logger.info("filenames %s, %s", v.file_1, v.file_2)
# Check for data self correlation.
if p_1.size == p_2.size:
self_corr = (p_1==p_2).all()
else:
self_corr = False
logger.info("Data self correlation is %s", self_corr)
logger.info("We are goingo to create random %s * dim_data.", m_factor)
# Generate random.
random_1 = mod.random_data(p_1.shape[0]*m_factor, b_size_1, offset_1) #fof
if self_corr == True:
random_2 = random_1
else:
random_2 = mod.random_data(p_2.shape[0]*m_factor, b_size_2, offset_2) #particles
# Create the result files.
result_file = open(v.log_file+'-result.dat', 'a')
###################
# Binning
###################
# Generate binning.
logger.info("Binning...")
shell, r = mod.binning(v.r_min, v.r_max, v.r_step, v.strategy)
# Save binning.
result_file.write("Shells ")
np.savetxt(result_file, shell[np.newaxis,:])
result_file.write("Radii ")
np.savetxt(result_file, r[np.newaxis,:])
result_file.flush()
########################
# Trees build
########################
# Build trees.
logger.info("Start building the trees...")
d_tree_1, stats['d_tree_1 build time'] = mod.tree_build(p_1, v.leafsize)
# Data trees.
if self_corr == True:
logger.info("Copying the first data tree into the second.")
d_tree_2 = d_tree_1
stats['d_tree_2 build time'] = stats['d_tree_1 build time']
else:
d_tree_2, stats['d_tree_2 build time'] = mod.tree_build(p_2, v.leafsize)
# Random trees.
r_tree_1, stats['r_tree_1 build time'] = mod.tree_build(random_1, v.leafsize)
if self_corr == True:
logger.info("Copying the first random tree into the second.")
r_tree_2 = r_tree_1
stats['r_tree_2 build time'] = stats['r_tree_1 build time']
else:
r_tree_2, stats['r_tree_2 build time'] = mod.tree_build(random_2, v.leafsize)
del p_1, p_2, random_1, random_2 # da controllare, il random non andra` cancellato poi
logger.info("Copying proc status file at mid.")
status_src = '/proc/' | +str(os.getpid())+'/status'
status_dest = '../logs/status'+str(os.getpid())+'@mid'
shcopy(status_src, status_dest)
stats['Time elapsed before traversing'] = time.time()-tt_glob
logger.info("Time elapsed before traversing %s", stats['Time elapsed before traversing'])
################################
# Counting
################################
# Data counting.
trav_stats = {}
stats_array = np.zeros((4, 10))
logge | r.info("Starting data-data counts...")
particles_counts, trav_stats['data'], stats_array[0,:]= mod.count(d_tree_1, d_tree_2, shell, self_corr, v.strategy)
logger.info("Particles counts:")
logger.info("DD %s", particles_counts)
result_file.write("Particles_counts ")
np.savetxt(result_file, particles_counts[np.newaxis,:])
result_file.flush()
# Random counting.
logger.info("Starting random-random counts...")
random_counts, trav_stats['random'], stats_array[1,:] = mod.count(r_tree_1, r_tree_2, shell, self_corr, v.strategy)
logger.info("Random counts:")
logger.info("RR %s", random_counts)
result_file.write("Random_counts ")
np.savetxt(result_file, random_counts[np.newaxis,:])
result_file.flush()
# Mixed counting 1 (needed for the LS estimator).
logger.info("Starting mixed_1 counts...")
mixed_counts_1, trav_stats['mixed 1'], stats_array[2,:] = mod.count(d_tree_1, r_tree_2, shell, False, v.strategy)
logger.info("Mixed_count_1:")
logger.info("M1 %s", mixed_counts_1)
result_file.write("Mixed_counts_1 ")
np.savetxt(result_file, mixed_counts_1[np.newaxis,:])
result_file.flush()
# Mixed counting 2 (needed for the LS estimator).
logger.info("Starting mixed_2 counts...")
mixed_counts_2, trav_stats['mixed 2'], stats_array[3,:] = mod.count(r_tree_1, d_tree_2, shell, False, v.strategy)
logger.info("Mixed_count_2:")
logger.info("M2 %s", mixed_counts_2)
result_file.write("Mixed_counts_2 ")
np.savetxt(result_file, mixed_counts_2[np.newaxis,:])
result_file.flush()
# Correlate.
#TPCF, TPCF_err, stats['Correlation time'] = mod.correlate(particles_counts, mixed_counts_1, mixed_counts_2, random_counts)
# Print statistics.
logger.info("particles_counts %s", particles_counts)
logger.info("random_counts %s", random_counts)
logger.info("mixed_counts_1 %s", mixed_counts_1)
logger.info("mixed_counts_2 %s", mixed_counts_2)
#logger.info( "TPCF %s", TPCF)
#logger.info("r %s", r)
#result_file.write("TPCF ")
#np.savetxt(result_file, TPCF[np.newaxis,:])
result_file.flush()
result_file.close()
# Plot results.
#mod.plot_correlation(r, TPCF, TPCF_err)
######################
# Info
######################
# Print some other statistics.
logger.info("########## Stats ##########")
logger.info("Strategy %s", v.strategy)
logger.info("Set 1 dimensions %s", stats['Set 1'])
logger.info("Set 2 dimensions %s", stats['Set 2'])
logger.info("Set 1 mean density %s", (stats['Set 1']/
(b_size_1[0]*b_size_1[1]*b_size_1[2])))
logger.info("Set 2 mean density %s", (stats['Set 2']/
(b_size_2[0]*b_size_2[1]*b_size_2[2])))
logger.info("Leafsize %s", v.leafsize)
logger.info('d_tree_1 build time %s', stats['d_tree_1 build time'])
logger.info('d_tree_2 build time %s', stats['d_tree_2 build time'])
logger.info('r_tree_1 build time %s', stats['r_tree_1 build time'])
logger.info('r_tree_2 build time %s', stats['r_tree_2 build time'])
logger.info('Time elapsed before traversing %s', stats['Time elapsed before traversing'])
logger.info("Data traverse:")
logger.info('Time for traverse %s', trav_stats['data']['Time for traverse'])
logger.info('Number of traverse %s', trav_stats['data']['Number of traverse'])
logger.info('Expected traverse %s', trav_stats['data']['Expected traverse'])
logger.info('Traverse speed %s', trav_stats['data']['Traverse speed'])
logger.info('n_opened_leaves %s', trav_stats['data']['n_opened_leaves'])
logger.info('sort_times %s', trav_stats['data']['sort_times'])
logger.info('dist_times %s', trav_stats['data']['dist_times'])
logger.info( "Wall time for all %s", time.time()-tt_glob)
logger. |
#!/bin/py | thon
import suds
from suds.client import Client
u = 'http://owsx:owsx_user@localhost:8080/orawsv/OWSX/OWSX_UTL/PAY_RAISE'
h = {'User-Agent':'Mozilla/4.0'}
client | = Client(u)
print(client)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-08-06 14:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.del | etion
class Migration(migrations.Migration):
dependencies = [
('players', '0006_statistics'),
]
operations = [
migrations.AlterField(
model_name='statistics',
name='player',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='statistic', | to='players.Player'),
),
]
|
import curses
from pygments import token
class ColorProfile:
def __init__( | self):
self._map = {
token.Text : 4,
token.Keyword : 3,
token.Comment : 2,
token.Number : 5,
token.Name : 6,
token.Error : 7,
token.Punctuation : 8,
token.Whitespace : 0
}
def color_for(self, attr):
| #this should return a curses color_pair depending
#on 'attr', which is a Pygments token type
if not attr in self._map:
return 1
else:
return self._map[attr] |
"""
Definition of models.
"""
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
# python manage.py makemigrations app
progress = models.IntegerField(default=0)
progress_v1 = models.Integer | Field(default=0)
progress_v2 = models.IntegerField(default=0)
progress_v3 = models.IntegerField(default=0)
progress_g1 = models.IntegerField(default=0)
progress_g2 = models.IntegerField(default=0)
progress_g3 = models.IntegerField(default=0)
progress_v1.contribute_to_class(User, ' | progress_v1')
progress_v2.contribute_to_class(User, 'progress_v2')
progress_v3.contribute_to_class(User, 'progress_v3')
progress_g1.contribute_to_class(User, 'progress_g1')
progress_g2.contribute_to_class(User, 'progress_g2')
progress_g3.contribute_to_class(User, 'progress_g3')
progress.contribute_to_class(User, 'progress')
class exersize(models.Model):
category = models.CharField(max_length = 50, default=0)
question = models.CharField(max_length = 150, default=0)
level = models.CharField(max_length = 2, default=0)
class exersizeAnswerJoin(models.Model):
answer = models.CharField(max_length = 30, default=0)
ex_id = models.ForeignKey(exersize, on_delete = models.CASCADE) |
import pandas as pd
import numpy as np
import warnings
from clint.textui import colored
warnings.simplefilter("ignore")
class Sweetcat:
"""Load SWEET-Cat database"""
def __init__(self):
# self.fname_sc = 'WEBSITE_online_EU-NASA_full_database.rdb'
self.fname_sc = 'WEBSITE_online_EU-NASA_full_database_clean.rdb'
# Loading the SweetCat database
self.readSC()
def readSC(self):
# TODO: Use the ra and dec, and match with coordinates instead of name
# stored in self.coordinates.
# Read the current version of SWEET-Cat
names_ = ['name', 'hd', 'ra', 'dec', 'V', 'Verr', 'p', 'perr',
'pflag', 'Teff', 'Tefferr', 'logg', 'logger',
'n1', 'n2', 'vt', 'vterr', 'feh', 'feherr', 'M', 'Merr',
'author', 'link', 'source', 'update', 'comment', 'database',
'n3']
# SC = pd.read_csv('WEBSITE_online.rdb', delimiter='\t', names=names_)
SC = pd.read_csv(self.fname_sc, delimiter='\t', names=names_)
# Clean star names
self.sc_names = [x.lower().replace(' ', '').replace('-', '') for x in SC.name]
self.sc_names = list(map(str.strip, self.sc_names))
# Original star names
self.sc_names_orig = [x.strip() for x in SC.name]
# Coordinates of the stars in SWEET-Cat
self.coordinates = SC.loc[:, ['ra', 'dec']]
# SWEET-Cat (used to automatically update the database label)
self.SC = SC
if __name__ == '__main__':
# Loading SWEET Cat
sc = Sweetcat()
# Check for duplicates, subset of columns can be changed
print('\nChecking for possible duplicates ...')
print(colored.green('Same RA/DEC'))
print(sc.SC[sc.SC.duplicated(['ra', | 'dec'], keep=False)][['name',
'hd',
'ra',
'dec']])
print(colored.green('\nSame HD number'))
print(sc.SC[sc.SC.duplicated(['hd'],
keep=False)].dropna(subset=['hd'])[['name',
'hd',
| 'ra',
'dec']])
print(colored.green('\nApproximate RA/DEC ...'))
# Remove the characters after the . in the coordinates
ra_sc = sc.SC['ra'].values.tolist()
ra_approx = list(map(lambda i: i[:i.find('.')], ra_sc))
dec_sc = sc.SC['dec'].values.tolist()
dec_approx = list(map(lambda i: i[:i.find('.')], dec_sc))
# Check for similar RA/DEC
idx_duplicate = []
for idx, (ra, dec) in enumerate(zip(ra_approx, dec_approx)):
dupli = list(np.where((np.array(ra_approx) == ra) &
(np.array(dec_approx) == dec))[0])
if len(dupli) > 1:
idx_duplicate.append(dupli)
# Print possible duplicates
print(colored.green('RA/DEC are similar: possible duplicates\n'))
unique_duplicate = set([tuple(t) for t in idx_duplicate])
for idx in unique_duplicate:
print(sc.SC.iloc[list(idx)][['name', 'hd', 'ra', 'dec']])
# Remove the -1.0 in microturbulence and its error
sc.SC[sc.SC['vt'] < 0.0][['name', 'hd', 'ra', 'dec',
'vt', 'vterr', 'author', 'link']]
# Change the value of a given cell
# sc.SC.at[9, 'vt'] = 1.44
# sc.SC.at[9, 'vterr'] = np.nan
# Uncomment some of the following lines to remove duplicates
# Indexes of the duplicates
# indexes = sc.SC[sc.SC.duplicated(['ra', 'dec'], keep=False)].index
# Remove a row (HD21749)
# new_sc = sc.SC.drop([2728])
# # Write the new file
# # Convert Tefferr column to integers
# new_sc['Tefferr'] = new_sc['Tefferr'].fillna('-111111')
# new_sc['Tefferr'] = new_sc['Tefferr'].astype(int).replace(-111111, 'NULL')
# # Replace NaN by NULL
# new_sc.fillna(value='NULL', inplace=True)
# new_sc.to_csv('WEBSITE_online_EU-NASA_full_database_clean_09-03-2020.rdb',
# sep='\t', index=False, header=False)
# # Select only the EU data
# sc_EU = new_sc[new_sc['database'].str.contains('EU')]
# # Drop the database column
# sc_like_old = sc_EU.drop(columns=['database'])
# sc_like_old.to_csv('WEBSITE_online_EU-updated_09-03-2020.rdb',
# sep='\t', index=False, header=False)
|
from .logic import LogicAdapter
class NoKnowledgeAdapter(LogicAdapter):
"""
This is a system adapter that is automatically added
to the list of logic adapters durring initialization.
This adapter is placed at the beginning of the list
to be given the highest | priority.
"""
def process(self, statement):
"""
If there are no known responses in the database,
then a confidence of 1 should be returned with
the input statement.
Other | wise, a confidence of 0 should be returned.
"""
if self.context.storage.count():
return 0, statement
return 1, statement
|
# Copyright 2008-2010 by Peter Cock. All rights reserved.
#
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Bio.AlignIO support for the "nexus" file format.
You are expected to use this module via the Bio.AlignIO functions (or the
Bio.SeqIO functions if you want to work directly with the gapped sequences).
See also the Bio.Nexus module (which this code calls internally),
as this offers more than just accessing the alignment or its
sequences as SeqRecord objects.
"""
from __future__ import print_function
import sys
# Add path to Bio
sys.path.append('../..')
from Bio.SeqRecord import SeqRecord
from Bio.Nexus import Nexus
from Bio.Align import MultipleSeqAlignment
from Bio.AlignIO.Interfaces import AlignmentWriter
from Bio import Alphabet
__docformat__ = "restructuredtext en"
# You can get a couple of example files here:
# http://www.molecularevolution.org/resources/fileformats/
# This is a generator function!
def NexusIterator(handle, seq_count=None):
"""Returns SeqRecord objects from a Nexus file.
Thus uses the Bio.Nexus module to do the hard work.
You are expected to call this function via Bio.SeqIO or Bio.AlignIO
(and not use it directly).
NOTE - We only expect ONE alignment matrix per Nexus file,
meaning this iterator will only yield one MultipleSeqAlignment.
"""
n = Nexus.Nexus(handle)
if not n.matrix:
# No alignment found
raise StopIteration
# Bio.Nexus deals with duplicated names by adding a '.copy' suffix.
# The original names and the modified names are kept in these two lists:
assert len(n.unaltered_taxlabels) == len(n.taxlabels)
if seq_count and seq_count != len(n.unaltered_taxlabels):
raise ValueError("Found %i sequences, but seq_count=%i"
% (len(n.unaltered_taxlabels), seq_count))
# TODO - Can we extract any annotation too?
records = (SeqRecord(n.matrix[new_name], id=new_name,
name=old_name, description="")
for old_name, new_name
in zip(n.unaltered_taxlabels, n.taxlabels))
# All done
yield MultipleSeqAlignment(records, n.alphabet)
class NexusWriter(AlignmentWriter):
"""Nexus alignment writer.
Note that Nexus files are only expected to hold ONE alignment
matrix.
You are expected to call this class via the Bio.AlignIO.write() or
Bio.SeqIO.write() functions.
"""
def write_file(self, alignments):
"""Use this to write an entire file containing the given alignments.
Arguments:
- alignments - A list or iterator returning MultipleSeqAlignment objects.
This should hold ONE and only one alignment.
"""
align_iter = iter(alignments) # Could have been a list
try:
first_alignment = next(align_iter)
except StopIteration:
first_alignment = None
if first_alignment is None:
# Nothing to write!
return 0
# Check there is only one alignment...
try:
second_alignment = next(align_iter)
except StopIteration:
second_alignment = None
if second_alignment is not None:
raise ValueError("We can only write one Alignment to a Nexus file.")
# Good. Actually write the single alignment,
self.write_alignment(first_alignment)
return 1 # we only support writing one alignment!
def write_alignment(self, alignment):
# Creates an empty Nexus object, adds the sequences,
# and then gets Nexus to prepare the output.
if len(alignment) == 0:
raise ValueError("Must have at least one sequence")
columns = alignment.get_alignment_length()
if columns == 0:
raise ValueError("Non-empty sequences are required")
minimal_record = "#NEXUS\nbegin data; dimensions ntax=0 nchar=0; " \
+ "format datatype=%s; end;" \
% self._classify_alphabet_for_nexus(alignment._alphabet)
n = Nexus.Nexus(minimal_record)
n.alphabet = alignment._alphabet
for record in alignment:
n.add_sequence(record.id, str(record.seq))
# For smaller alignments, don't bother to interleave.
# For larger alginments, interleave to avoid very long lines
# in the output - something MrBayes can't handle.
# TODO - Default to always interleaving?
n.write_nexus_data(self.handle, interleave=(columns > 1000))
def _classify_alphabet_for_nexus(self, alphabet):
"""Returns 'protein', 'dna', 'rna' based on the alphabet (PRIVATE).
Raises an exception if this is not possible."""
# Get the base alphabet (underneath any Gapped or StopCodon encoding)
a = Alphabet._get_base_alphabet(alphabet)
"""condition loop below was edited by Ambuj Kumar in order to make
it align with ConCat"""
if 'Alphabet.Alphabet' not in str(type(a)) and 'Alphabet.ProteinAlphabet' not in str(type(a)) and 'Alphabet.DNAAlphabet' not in str(type(a)) and 'Alphabet.RNAAlphabet' not in str(type(a)) and 'Alphabet.Gapped' not in str(type(a)):
raise TypeError("Invalid alphabet")
elif 'Protein' in str(type(a)):
return "protein"
elif 'DNA' in str(type(a)):
return "dna"
elif 'RNA' in str(type(a)):
return "rna"
else:
# Must be something like NucleotideAlphabet or
# just the generic Alphabet (default for fasta files)
raise ValueError("Need a DNA, RNA or Protein alphabet")
if __name__ == "__main__":
from Bio._py3k import StringIO
print("Quick self test")
print("")
print("Repeated names without a TAXA block")
ha | ndle = StringIO("""#NEXUS
[TITLE: NoName]
begin data;
dimensions ntax=4 nchar=50;
format interleave | datatype=protein gap=- symbols="FSTNKEYVQMCLAWPHDRIG";
matrix
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ----
ALEU_HORVU MAHARVLLLA LAVLATAAVA VASSSSFADS NPIRPVTDRA ASTLESAVLG
CATH_HUMAN ------MWAT LPLLCAGAWL LGV------- -PVCGAAELS VNSLEK----
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ---X
;
end;
""")
for a in NexusIterator(handle):
print(a)
for r in a:
print("%r %s %s" % (r.seq, r.name, r.id))
print("Done")
print("")
print("Repeated names with a TAXA block")
handle = StringIO("""#NEXUS
[TITLE: NoName]
begin taxa
CYS1_DICDI
ALEU_HORVU
CATH_HUMAN
CYS1_DICDI;
end;
begin data;
dimensions ntax=4 nchar=50;
format interleave datatype=protein gap=- symbols="FSTNKEYVQMCLAWPHDRIG";
matrix
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ----
ALEU_HORVU MAHARVLLLA LAVLATAAVA VASSSSFADS NPIRPVTDRA ASTLESAVLG
CATH_HUMAN ------MWAT LPLLCAGAWL LGV------- -PVCGAAELS VNSLEK----
CYS1_DICDI -----MKVIL LFVLAVFTVF VSS------- --------RG IPPEEQ---X
;
end;
""")
for a in NexusIterator(handle):
print(a)
for r in a:
print("%r %s %s" % (r.seq, r.name, r.id))
print("Done")
print("")
print("Reading an empty file")
assert 0 == len(list(NexusIterator(StringIO())))
print("Done")
print("")
print("Writing...")
handle = StringIO()
NexusWriter(handle).write_file([a])
handle.seek(0)
print(handle.read())
handle = StringIO()
try:
NexusWriter(handle).write_file([a, a])
assert False, "Should have rejected more than one alignment!"
except ValueError:
pass
|
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.db import connections as django_connections, DEFAULT_DB_ALIAS, router
from corehq.sql_db.exceptions import PartitionValidationError
@checks.register('settings')
def custom_db_checks(app_configs, **kwargs):
errors = []
custom_db_settings = [
'SYNCLOGS_SQL_DB_ALIAS'
]
for setting in custom_db_settings:
default = getattr(settings, setting) == DEFAULT_DB_ALIAS
custom = not default and getattr(settings, setting) in settings.DATABASES
if not (default or custom):
errors.append(
checks.Error('settings.{} should either be "default" for a default database'
'or a valid database defined in settings.DATABASES'.format(setting))
)
return errors
@checks.register('settings')
def check_plproxy_config(app_configs, **kwargs):
allowed_keys = {'PROXY_FOR_STANDBYS', 'PROXY', 'SHARDS', 'PLPROXY_HOST'}
messages = []
for db, config in settings.DATABASES.items():
if 'PLPROXY' in config:
unknown_keys = set(config['PLPROXY']) - allowed_keys
if unknown_keys:
messages.append(checks.Warning(
f'Unrecognised PLPROXY settings: {unknown_keys}'
))
try:
from corehq.sql_db.config import plproxy_config, _get_standby_plproxy_config
if plproxy_config:
_get_standby_plproxy_config(plproxy_config)
except PartitionValidationError as e:
messages.append(checks.Error(f'Error in PLPROXY standby configuration: {e}'))
return messages
@checks.register('settings')
def check_standby_configs(app_configs, **kwargs):
standby_to_master = {
db: config.get('STANDBY', {}).get('MASTER')
for db, config in settings.DATABASES.items()
if config.get('STANDBY', {}).get('MASTER')
}
all_masters = {
db for db, config in settings.DATABASES.items()
if 'STANDBY' not in config and 'HQ_ACCEPTABLE_STANDBY_DELAY' not in config
}
errors = []
custom_db_settings = [
'REPORTING_DATABASES',
'LOAD_BALANCED_APPS'
]
for setting_name in custom_db_settings:
setting = getattr(settings, setting_name)
if not setting:
continue
for key, config in setting.items():
if 'READ' in config:
read_dbs = {db for db, weight in config['READ']}
masters = read_dbs & all_masters
standby_masters = {
standby_to_master[db]
for db in read_dbs
if db in standby_to_master
}
if len(masters | standby_masters) > 1:
errors.append(checks.Error(
'"settings.{}.{}" refers to multiple master databases. All READ database'
'must be refer to the same master database.'.format(setting_name, key)
))
return errors
@checks.register(checks.Tags.database, deploy=True)
def check_standby_databases(app_configs, **kwargs):
from corehq.sql_db.util import get_standby_databases
standbys = {
db
for db, config in settings.DATABASES.items()
if 'STANDBY' in config or 'HQ_ACCEPTABLE_STANDBY_DELAY' in config
}
confirmed_standbys = get_standby_databases()
badly_configured = standbys - confirmed_standbys
if badly_configured:
return [
checks.Error("Some databases configured as STANDBY are not in recovery mode: {}".format(
', '.join(badly_configured)
))
]
return []
@checks.register(checks.Tags.database, deploy=True)
def check_db_tables(app_configs, **kwargs):
from corehq.sql_db.models import PartitionedModel
from corehq.sql_db.util import get_db_aliases_for_partitioned_query
errors = []
# some apps only apply to specific envs
env_specific_apps = {
'icds_reports': settings.ICDS_ENVS,
'aaa': ('none',),
}
ignored_models = [
'DeprecatedXFormAttachmentSQL'
]
def _check_model(model_class, using=None):
db = using or router.db_for_read(model_class)
try:
with django_connections[db].cursor() as cursor:
cursor.execute("SELECT %s::regclass", [model_class._meta.db_table])
except Exception as e:
errors.append(checks.Error('checks.Error querying model on database "{}": "{}.{}": {}.{}({})'.format(
using or DEFAULT_DB_ALIAS,
model_class._meta.app_label, model_class.__name__,
| e.__class__.__module__, e.__class__.__name__,
e
)))
for model in apps.get_models():
app_label = model._meta.app_label
enabled_envs = env_specific_apps.get(app_label)
if enabled_envs and settings.SERVER_ENVIRONMENT not in enabled_envs:
continue
if model.__name__ in ignored_models or not model._meta.managed:
co | ntinue
if issubclass(model, PartitionedModel):
for db in get_db_aliases_for_partitioned_query():
_check_model(model, using=db)
else:
_check_model(model)
return errors
|
# snapy - a python snmp library
#
# Copyright (C) 2009 ITA Software, Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 2 as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
import time
from twisted.trial import unittest
from snapy.netsnmp.unittests import TestCase
from snapy.netsnmp import Session, SnmpError, SnmpTimeout, OID
class Result(object):
"""Container for async results"""
value = None
def set_result(value, result):
result.value = value
class TestSessionV1(TestCase):
version = "1"
bulk = False
basics = [
(OID(".1.3.6.1.4.2.1.1"), 1),
(OID(".1.3.6.1.4.2.1.2"), -1),
(OID(".1.3.6.1.4.2.1.3"), 1),
(OID(".1.3.6.1.4.2.1.4"), "test value"),
]
def setUpSession(self, address):
self.session = Session(
version=self.version,
community="public",
peername=address,
_use_bulk=self.bulk)
self.session.open()
def tearDownSession(self):
self.session.close()
def test_sget(self):
result = self.session.sget([x for x,v in self.basics])
self.assertEquals(result, self.basics)
return self.finishGet()
def test_get_small(self):
result = Result()
self.session.get([x for x,v in self.basics], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishGet()
def test_get_big(self):
oids = []
for i in xrange(1, 100):
oids.append(OID((1,3,6,1,4,2,4,i)))
result = Result()
self.session.get(oids, set_result, result)
self.session.wait()
result = dict(result.value)
for oid in oids:
assert oid in result
assert result[oid] == "data data data data"
return self.finishGet()
def test_walk_tree(self):
result = Result()
self.session.walk([".1.3.6.1.4.2.1"], set_result, result)
self.session.wait()
self.assertEquals(result.value, self.basics)
return self.finishWalk()
def test_walk_leaf(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result)
self.session.wait()
self.assertEquals(result.value, [(oid, 1)])
return self.finishGet()
def test_walk_strict(self):
oid = OID(".1.3.6.1.4.2.1.1")
result = Result()
self.session.walk([oid], set_result, result, strict=True)
self.session.wait()
self.assertEquals(result.value, [])
return self.finishStrictWalk()
def test_sysDescr(self):
result = self.session.sget([OID("SNMPv2-MIB::sysDescr.0")])
self.assert_(result)
self.assertIsInstance(result[0][1], str)
self.assert_(len(result[0][1]) > 0)
return self.finishGet()
class TestSessionV2c(TestSessionV1):
version = "2c"
def test_hrSystemDate(self):
# This is a special string that gets formatted using the
# MIB's DISPLAY-HINT value. Also, strip off everything
# other than the date and hour to avoid a race condition.
# And one more quirk, these dates are not zero padded
# so we must format the date manually, whee...
| now = time.localtime()
now = "%d-%d-%d,%d" % (now[0], now[1], now[2], now[3])
result = self.session.sget([OID(".1.3.6.1.2.1.25.1.2.0")])
self.assert_(result)
value = result[0][1].split(':', 1)[0]
| self.assertEquals(value, now)
return self.finishGet()
class TestSessionV2cBulk(TestSessionV2c):
bulk = True
class TestTimeoutsV1(unittest.TestCase):
version = "1"
def setUp(self):
self.session = Session(
version=self.version,
community="public",
peername="udp:127.0.0.1:9",
retries=0, timeout=0.1)
self.session.open()
def test_sget(self):
self.assertRaises(SnmpError, self.session.sget, [".1.3.6.1.4.2.1.1"])
def test_get(self):
result = Result()
self.session.get([".1.3.6.1.4.2.1.1"], set_result, result)
self.session.wait()
assert isinstance(result.value, SnmpTimeout)
def tearDown(self):
self.session.close()
class TestTimeoutsV2c(TestTimeoutsV1):
version = "2c"
class TestOID(unittest.TestCase):
def test_oid_name(self):
oid = OID("1.3.6.1.2.1.1.1.0")
self.assertEquals(oid, OID("SNMPv2-MIB::sysDescr.0"))
self.assertEquals(oid, OID("sysDescr.0"))
|
f | rom __future__ import unicode_literals
import sys
sys.path = sys.path[1:]
from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, render_to_response
from django.core.mail import send_mail
from django.conf import settings
import os
def blog(request):
context = {}
c | ontext['thanks'] = True
return render(request, "blog/blog.html", context)
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Raspberry.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
| except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line( | sys.argv)
|
from . import account_journal
from . impor | t barcode_rule
from . import pos_config
from . import pos_order
from . import product_category
from . import pr | oduct_template
|
import numpy as np
from moha.system.basis import SlaterDeterminant
| class state(object):
def __init__(self):
pa | ss
class WaveFunction(state):
def __init__(self):
pass
class HFWaveFunction(WaveFunction):
"""
"""
def __init__(self,dim,occ,coefficient,density,fock,eorbitals,Eelec,Etot):
self.dim = dim
self.occ = occ
self.coefficient = coefficient
self.density = density
self.fock = fock
self.eorbitals = eorbitals
self.Eelec = Eelec
self.Etot = Etot
@property
def configuration(self):
c = {}
for spin in self.occ:
c[spin] = [1]*self.occ[spin] + [0]*(self.dim - self.occ[spin])
return c
|
from setuptools import setup
'''
The packages subprocess and tkinter is also required from the standard library
''' |
setup(
name='PLOD',
version='1.0',
description='Matplotlib plot designer',
author='David Kleiven',
licence='MIT',
author_ema | il='davidkleiven446@gmail.com',
install_requires=['numpy', 'matplotlib'],
url='https://github.com/davidkleiven/PLOD',
classifiers=[
'Programming Language :: Python :: 3',
],
#py_modules=['plotHandler', 'controlGUI'],
packages=['PLOD']
)
|
import deck
class Hand:
def __init__(self):
self.cards = []
de | f add(self, card):
self.cards.app | end(card)
def size(self):
return len(self.cards)
def score(self):
# sperate cards into aces and others
regular_cards = [c for c in self.cards if c.value != "A"]
aces = [c for c in self.cards if c.value == "A"]
#tally up regular card values
points = 0
for c in regular_cards:
if isinstance(c.value, basestring):
points += 10
else:
points += c.value
# now add in aces
for c in aces:
if points + 11 <= 21:
points += 11
else:
points += 1
return points
def __repr__(self):
hand_string = ""
for card in self.cards:
hand_string += str(card)
hand_string += " "
return hand_string
|
ogle.oauth2 import service_account # type: ignore
from google.cloud.dialogflowcx_v3.types import flow
from google.cloud.dialogflowcx_v3.types import flow as gcdc_flow
from google.longrunning import operations_pb2 # type: ignore
from google.protobuf import empty_pb2 # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflowcx",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class FlowsTransport(abc.ABC):
"""Abstract transport class for Flows."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
DEFAULT_HOST: str = "dialogflow.googleapis.com"
def __init__(
self,
*,
host: str = DEFAULT_HOST,
credentials: ga_credentials.Credentials = None,
credentials_file: Optional[str] = None,
scopes: Optional[Sequence[str]] = None,
quota_project_id: Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
always_use_jwt_access: Optional[bool] = False,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]):
The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scopes (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
always_use_jwt_access (Optional[bool]): Whether self signed JWT should
be used for service account credentials.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
scopes_kwargs = {"scopes": scopes, "default_scopes": self.AUTH_SCOPES}
# Save the scopes.
self._scope | s = scopes
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise core_exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = google.auth.load_credentials_from_file(
credentials_file | , **scopes_kwargs, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = google.auth.default(
**scopes_kwargs, quota_project_id=quota_project_id
)
# If the credentials are service account credentials, then always try to use self signed JWT.
if (
always_use_jwt_access
and isinstance(credentials, service_account.Credentials)
and hasattr(service_account.Credentials, "with_always_use_jwt_access")
):
credentials = credentials.with_always_use_jwt_access(True)
# Save the credentials.
self._credentials = credentials
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.create_flow: gapic_v1.method.wrap_method(
self.create_flow, default_timeout=None, client_info=client_info,
),
self.delete_flow: gapic_v1.method.wrap_method(
self.delete_flow, default_timeout=None, client_info=client_info,
),
self.list_flows: gapic_v1.method.wrap_method(
self.list_flows, default_timeout=None, client_info=client_info,
),
self.get_flow: gapic_v1.method.wrap_method(
self.get_flow, default_timeout=None, client_info=client_info,
),
self.update_flow: gapic_v1.method.wrap_method(
self.update_flow, default_timeout=None, client_info=client_info,
),
self.train_flow: gapic_v1.method.wrap_method(
self.train_flow, default_timeout=None, client_info=client_info,
),
self.validate_flow: gapic_v1.method.wrap_method(
self.validate_flow, default_timeout=None, client_info=client_info,
),
self.get_flow_validation_result: gapic_v1.method.wrap_method(
self.get_flow_validation_result,
default_timeout=None,
client_info=client_info,
),
self.import_flow: gapic_v1.method.wrap_method(
self.import_flow, default_timeout=None, client_info=client_info,
),
self.export_flow: gapic_v1.method.wrap_method(
self.export_flow, default_timeout=None, client_info=client_info,
),
}
def close(self):
"""Closes resources associated with the transport.
.. warning::
Only call this method if the transport is NOT shared
with other clients - this may cause errors in other clients!
"""
raise NotImplementedError()
@property
def operations_client(self):
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def create_flow(
self,
) -> Callable[
[gcdc_flow.CreateFlowRequest], Union[gcdc_flow.Flow, Awaitable[gcdc_flow.Flow]]
]:
raise NotImplementedError()
@property
def delete_flow(
self,
) -> Callable[
[flow.DeleteFlowRequest], Union[empty_pb2.Empty, Awaitable[empty_pb2.Empty]]
]:
raise NotImplementedError()
@property
def list_flows(
self,
) -> Callable[
[flow.ListFlowsRequest],
Union[flow.ListFlowsResponse, Awaitable[flow.ListFlowsResponse]],
]:
raise NotImplementedError()
@property
def get_flow(
self,
) -> Callable[[flow.GetFlowRequest], Union[flow.Flow, Awaitable[flow.Flow]]]:
raise NotImplementedError()
@property
def update_flow(
self,
) -> Callable[
[gcdc_flow.UpdateFlowRequest], Union[gcdc_flow.Flow, Awaitable[gcdc_flow.Flow]]
]:
raise NotImplementedError()
@property
def train_flow(
self,
) -> Callable[
[flow.TrainFlowRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedError()
@property
def validate_flow(
self,
) -> Callable[
[flow.ValidateFlowRequest],
Union[flow.FlowValidationResult, Awaitable[flow.FlowValidationResult]],
]:
raise NotImplementedError()
@property
def get_flow_validation_result(
self,
) -> Callable[
[flow.GetFlowValidationResultRequest],
Union[flow.FlowValidationResult, Awaitable[flow.FlowValidationResult]],
]:
raise NotImplementedError()
@property
def import_flow(
self,
) -> Callable[
[flow.ImportFlowRequest],
Union[operations_pb2.Operation, Awaitable[operations_pb2.Operation]],
]:
raise NotImplementedErr |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import zmq
from zmq.eventloop import ioloop as ioloop_mod
import zmqdecorators
import time
SERVICE_NAME = "urpobot.motor"
SERVICE_PORT = 7575
SIGNALS_PORT = 7576
# How long to wait for new commands before stopping automatically
COMMAND_GRACE_TIME = 0.250
class motorserver(zmqdecorators.service):
def __init__(self, service_name, service_port, serialport):
super(motorserver, self).__init__(service_name, service_port)
self.serial_port = serialport
self.input_buffer = ""
self.evthandler = ioloop_mod.IOLoop.instance().add_handler(self.serial_port.fileno(), self.handle_serial_event, ioloop_mod.IOLoop.instance().READ)
self.last_command_time = time.time()
self.pcb = ioloop_mod.PeriodicCallback(self.check_data_reveived, COMMAND_GRACE_TIME)
self.pcb.start()
def check_data_reveived(self, *args):
if (time.time() - self.last_command_time > COMMAND_GRACE_TIME):
self._setspeeds(0,0)
def _setspeeds(self, m1speed, m2speed):
self.serial_port.write("S%04X%04X\n" % ((m1speed & 0xffff), (m2speed & 0xffff)))
@zmqdecorators.method()
def setspeeds(self, resp, m1speed, m2speed):
self.last_command_time = time.time()
#print("Got speeds %s,%s" % (m1speed, m2speed))
self._setspeeds(m1speed, m2speed)
# TODO: actually handle ACK/NACK somehow (we need to read it from the serialport but we can't block while waiting for it...)
resp.send("ACK")
def handle_serial_event(self, fd, events):
# Copied from arbus that was thread based
if not self.serial_port.inWaiting():
# Don't try to read if there is no data, instead sleep (yield) a bit
time.sleep(0)
return
data = self.serial_port.read(1)
if len(data) == 0:
return
#print("DEBUG: data=%s" % data)
# Put the data into inpit buffer and ch | eck for CRLF
self.input_buffer += data
# Trim prefix NULLs and linebreaks
self.input_buffer = self.input_buffer.lstrip(chr(0x0) + "\r\n")
#print "input_buffer=%s" % repr(self.input_buffer)
if ( len(self.input_buffer) > 1
and self.input_buffer[-2:] == "\r\n"):
# Got a message, parse it (sans the CRLF) and empty the buf | fer
self.message_received(self.input_buffer[:-2])
self.input_buffer = ""
def message_received(self, message):
#print("DEBUG: msg=%s" % message)
try:
# Currently we have no incoming messages from this board
pass
except Exception as e:
print "message_received exception: Got exception %s" % repr(e)
# Ignore indexerrors, they just mean we could not parse the command
pass
pass
def cleanup(self):
print("Cleanup called")
self._setspeeds(0,0)
def run(self):
print("Starting motorserver")
super(motorserver, self).run()
if __name__ == "__main__":
import serial
import sys,os
port = serial.Serial(sys.argv[1], 115200, xonxoff=False, timeout=0.01)
instance = motorserver(SERVICE_NAME, SERVICE_PORT, port)
instance.run()
|
# Include the Dropbox SDK
import dropbox
class DropBoxSDK():
# Get your app key and secret from the Dropbox developer website
app_key = ''
app_secret = ''
def authorize(self):
flow = dropbox.client.DropboxOAuth2FlowNoRedirect(self.app_key, self.app_secret)
authorize_url = flow.start()
# Have the user sign in and authorize this token
authorize_url = flow.start()
print '1. Go to: ' + authorize_url
print '2. Click "Allow" (you might have to log in first)'
print '3. Copy the authorization code.'
code = raw_input("Enter the authorization code here: ").strip()
# This will fail if the user enters an invalid authorization code
self.access_token, user_id = flow.finish(code)
client = dropbox.clien | t.DropboxClient(self.access_token)
#print 'linked | account: ', client.account_info()
return client
|
from setuptools import setup, Extension
from setuptools.command.build_ext import build_ext
import os.path
import subprocess
import sys
libpg_query = os.path.join('.', 'libpg_query')
class PSqlParseBuildExt(build_ext):
def run(self):
return_code = subprocess.call(['make', '-C', libpg_query, 'build'])
if return_code:
sys.stderr.write('''
An error occurred during extension building.
Make sure you have bison and flex installed on your system.
''')
sys.exit(return_code)
build_ext.run(self)
USE_CYTHON = bool(os.environ.get('USE_CYTHON'))
ext = '.pyx' if USE_CYTHON else '.c'
libraries = ['pg_query']
extensions = [
Extension('psqlparse.parser',
['psqlparse/parser' + ext],
libraries=libraries,
include_dirs=[libpg_ | query],
library_dirs=[libpg_query])
]
if USE_CYTHON:
from Cython.Build import cythonize
extensions = cythonize(extensions)
setup(name='psqlparse',
version='1.0-rc7',
url='https://github.com/alculquicondor/psqlparse',
author='Aldo Culquicondor',
author_email='aldo@amigocloud.com',
description='Parse | SQL queries using the PostgreSQL query parser',
install_requires=['six'],
license='BSD',
cmdclass={'build_ext': PSqlParseBuildExt},
packages=['psqlparse', 'psqlparse.nodes'],
ext_modules=extensions)
|
import sys
import gc
import pygame
from pygame.locals import *
from input import *
import snd
TICKS_PER_SECOND = 25
GAMETICKS = 1000 / TICKS_PER_SECOND
def set_game_speed( slowdown ):
global TICKS_PER_SECOND
global GAMETICKS
TICKS_PER_SECOND = int( 25 * slowdown )
GAMETICKS = 1000 / TICKS_PER_SECOND
class Game:
def __init__( self, name, configuration ):
self.config = configuration
self.name = name
def init_pygame( self ):
snd.pre_init()
# Init the display
pygame.init()
self.userinput = UserInput()
if not self.config.is_fullscreen:
pygame.display.set_mode( self.config.resolution )
else:
pygame.display.set_mode( self.config.resolution, pygame.FULLSCREEN )
p | ygame.display.set_caption( self.name )
# Init the input
pygame.mouse.set_ | visible( False )
pygame.event.set_grab( False )
snd.init()
def deinit_pygame( self ):
snd.deinit()
pygame.quit()
def before_gameloop( self ):
pass
def after_gameloop( self ):
pass
def run( self ):
try:
self.init_pygame()
self.before_gameloop()
self.fps = 0
frame_count = 0
next_game_tick = pygame.time.get_ticks()
next_half_second = pygame.time.get_ticks()
# main loop
self.game_is_done = False
while not self.game_is_done:
# events
self.handle_events()
# game tick
loop_count = 0
while pygame.time.get_ticks() > next_game_tick and loop_count < 4:
x, y = pygame.mouse.get_pos()
self.userinput.mouse.feed_pos( Vec2D(x, y) )
self.do_tick( self.userinput )
self.userinput.update()
next_game_tick += GAMETICKS
loop_count += 1
## gc.collect()
if loop_count >= 4: # don't overdo the ticks
next_game_tick = pygame.time.get_ticks()
# render
time_sec = pygame.time.get_ticks() * 0.001
interpol = 1 - ((next_game_tick - pygame.time.get_ticks()) / float(GAMETICKS))
self.render(pygame.display.get_surface(), interpol, time_sec )
pygame.display.flip()
frame_count += 1
if pygame.time.get_ticks() > next_half_second:
self.fps = 2 * frame_count
frame_count = 0
next_half_second += 500
self.after_gameloop()
self.deinit_pygame()
except:
self.deinit_pygame()
print "Unexpected error:", sys.exc_info()[0]
raise
def handle_events( self ):
for event in pygame.event.get():
if event.type == QUIT:
self.game_is_done = True
elif event.type == KEYDOWN:
self.userinput.key.feed_down( event.key )
self.userinput.key.feed_char( event.unicode )
elif event.type == KEYUP:
self.userinput.key.feed_up( event.key )
elif event.type == MOUSEBUTTONDOWN:
self.userinput.mouse.feed_down( event.button )
self.state.mouse_down( event.button )
elif event.type == MOUSEBUTTONUP:
self.userinput.mouse.feed_up( event.button )
elif event.type == JOYBUTTONDOWN:
self.userinput.joys[event.joy].feed_down( event.button )
elif event.type == JOYBUTTONUP:
self.userinput.joys[event.joy].feed_up( event.button )
def draw_fps( self, surface ):
font = pygame.font.Font( None, 20 )
render_text = font.render( str(self.fps), 0, (255,255,255) )
surface.blit( render_text, (10,10) )
|
#!/usr/bin/env python
import errno
import os
import re
import sys
from configparser import ConfigParser
import jinja2
from errbot.version import VERSION
def new_plugin_wizard(directory=None):
"""
Start the wizard to create a new plugin in the current working directory.
"""
if directory is None:
print("This wizard will create a new plugin for you in the current directory.")
directory = os.getcwd()
else:
print(f'This wizard will create a new plugin for you in "{directory}".')
if os.path.exists(directory) and not os.path.isdir(directory):
print(f'Error: The path "{directory}" exists but it isn\'t a directory')
sys.exit(1)
name = ask(
"What should the name of your new plugin be?",
validation_regex=r"^ | [a-zA-Z][a-zA-Z0-9 _-]*$",
).strip()
module_name = name.lower().replace(" ", "_")
directory_name = name.lower().replace(" ", "-")
class_name = "".join([s.capitalize() for s in name.lo | wer().split(" ")])
description = ask(
"What may I use as a short (one-line) description of your plugin?"
)
python_version = "3"
errbot_min_version = ask(
f"Which minimum version of errbot will your plugin work with? "
f"Leave blank to support any version or input CURRENT to select "
f"the current version {VERSION}."
).strip()
if errbot_min_version.upper() == "CURRENT":
errbot_min_version = VERSION
errbot_max_version = ask(
f"Which maximum version of errbot will your plugin work with? "
f"Leave blank to support any version or input CURRENT to select "
f"the current version {VERSION}."
).strip()
if errbot_max_version.upper() == "CURRENT":
errbot_max_version = VERSION
plug = ConfigParser()
plug["Core"] = {
"Name": name,
"Module": module_name,
}
plug["Documentation"] = {
"Description": description,
}
plug["Python"] = {
"Version": python_version,
}
if errbot_max_version != "" or errbot_min_version != "":
plug["Errbot"] = {}
if errbot_min_version != "":
plug["Errbot"]["Min"] = errbot_min_version
if errbot_max_version != "":
plug["Errbot"]["Max"] = errbot_max_version
plugin_path = directory
plugfile_path = os.path.join(plugin_path, module_name + ".plug")
pyfile_path = os.path.join(plugin_path, module_name + ".py")
try:
os.makedirs(plugin_path, mode=0o700)
except IOError as e:
if e.errno != errno.EEXIST:
raise
if os.path.exists(plugfile_path) or os.path.exists(pyfile_path):
path = os.path.join(directory, f"{module_name}.{{py,plug}}")
ask(
f"Warning: A plugin with this name was already found at {path}\n"
f"If you continue, these will be overwritten.\n"
f'Press Ctrl+C to abort now or type in "overwrite" to confirm overwriting of these files.',
valid_responses=["overwrite"],
)
with open(plugfile_path, "w") as f:
plug.write(f)
with open(pyfile_path, "w") as f:
f.write(render_plugin(locals()))
print(f"Success! You'll find your new plugin at '{plugfile_path}'")
print(
"(Don't forget to include a LICENSE file if you are going to publish your plugin)."
)
def ask(question, valid_responses=None, validation_regex=None):
"""
Ask the user for some input. If valid_responses is supplied, the user
must respond with something present in this list.
"""
response = None
print(question)
while True:
response = input("> ")
if valid_responses is not None:
assert isinstance(valid_responses, list)
if response in valid_responses:
break
else:
print(f'Bad input: Please answer one of: {", ".join(valid_responses)}')
elif validation_regex is not None:
m = re.search(validation_regex, response)
if m is None:
print(
f"Bad input: Please respond with something matching this regex: {validation_regex}"
)
else:
break
else:
break
return response
def render_plugin(values):
"""
Render the Jinja template for the plugin with the given values.
"""
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(
os.path.join(os.path.dirname(__file__), "templates")
),
auto_reload=False,
keep_trailing_newline=True,
autoescape=True,
)
template = env.get_template("new_plugin.py.tmpl")
return template.render(**values)
if __name__ == "__main__":
try:
new_plugin_wizard()
except KeyboardInterrupt:
sys.exit(1)
|
#! /usr/bin/env python
import logging, logtool
from .page import Page
from .xlate_frame import XlateFrame
LOG = logging.getLogger (__name__)
class Contents:
@logtool.log_call
def __init__ (self, canvas, objects):
self.canvas = canvas
self.objects = objects
@logtool.log_call
def render (self):
with Page (self.canvas) as pg:
for obj in self.objects:
coo | rds = pg.next (obj.asset)
with XlateFrame (self.canvas, obj.tile_t | ype, *coords,
inset_by = "margin"):
# print ("Obj: ", obj.asset)
obj.render ()
|
from PyQt5 import QtWidgets, uic, QtCore
import sys
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, *args, **kwargs):
# Load the UI Page. uic is the thing that lets us use a .ui file
# This only works if the .ui file is in the same directory
super(MainWindow, self).__init__(*args, **kwargs)
uic.loadUi('hybrid_test_gui.ui', self)
# This will connect all the buttons to methods that are called when the
# buttons are clicked by the user.
self.setup_buttons_etc()
## STATES ##
# These will get updated by the GUI if the user clicks a button or
# if the server change | s something.
self.state_connected = False
self.state_igniter = False
self.state_MEV = False
self.state_N2OV = False
self.state_N2O = False
self.state_N2 = False
self.state_NCV = False
self.state_RV = False
self.state_VV = False |
self.state_abort = False
self.state_run = False
def setup_buttons_etc(self):
# Alright so basically since there is not a "loop" to put methods in that
# you want to update based on things that have changed in the GUI,
# PyQt has these things called signals and slots. They let you connect
# changes or "signals" of objects in the GUI to methods.
# For instance we can connect_btn is a QPushButton from QT Designer,
# which has a signal "clicked". We can "connect" this to a method that
# we want to run when the button is clicked.
self.connect_btn.clicked.connect(self._connect_btn)
# We do this for every button in the GUI. Each button gets a corresponding
# method that has the same name as the button but with an _ in front
# To add a new button:
# Add the button to the GUI in QT Designer
# Give it a nice name
# Add a new line to this method in the form "self.button_name.clicked.connect(self._button_name)"
# Add a new method in below setup_button_etc of the form "def _button_name(self):"
# Any code in that method will be run when the button is clicked!
self.disconnect_btn.clicked.connect(self._disconnect_btn)
self.igniter_btn_toggle.clicked.connect(self._igniter_btn_toggle)
self.MEV_btn_off.clicked.connect(self._MEV_btn_off)
self.MEV_btn_on.clicked.connect(self._MEV_btn_on)
self.N2OV_btn_off.clicked.connect(self._N2OV_btn_off)
self.N2OV_btn_on.clicked.connect(self._N2OV_btn_on)
self.N2O_btn_off.clicked.connect(self._N2O_btn_off)
self.N2O_btn_on.clicked.connect(self._N2O_btn_on)
self.N2_btn_off.clicked.connect(self._N2_btn_off)
self.N2_btn_on.clicked.connect(self._N2_btn_on)
self.NCV_btn_off.clicked.connect(self._NCV_btn_off)
self.NCV_btn_on.clicked.connect(self._NCV_btn_on)
self.RV_btn_off.clicked.connect(self._RV_btn_off)
self.RV_btn_on.clicked.connect(self._RV_btn_on)
self.VV_btn_off.clicked.connect(self._VV_btn_off)
self.VV_btn_on.clicked.connect(self._VV_btn_on)
self.abort_btn.clicked.connect(self._abort_btn)
self.run_btn.clicked.connect(self._run_btn)
def _connect_btn(self):
self.state_connected = True
print(self.state_connected)
def _disconnect_btn(self):
self.state_connected = False
print(self.state_connected)
def _igniter_btn_toggle(self):
print(self.state_igniter)
def _MEV_btn_off(self):
print(self.state_MEV)
def _MEV_btn_on(self):
print(self.state_MEV)
def _N2OV_btn_off(self):
print(self.state_N2OV)
def _N2OV_btn_on(self):
print(self.state_N2OV)
def _N2O_btn_off(self):
print(self.state_N2O)
def _N2O_btn_on(self):
print(self.state_N2O)
def _N2_btn_off(self):
print(self.state_N2)
def _N2_btn_on(self):
print(self.state_N2)
def _NCV_btn_off(self):
print(self.state_NCV)
def _NCV_btn_on(self):
print(self.state_NCV)
def _RV_btn_off(self):
print(self.state_RV)
def _RV_btn_on(self):
print(self.state_RV)
def _VV_btn_off(self):
print(self.state_VV)
def _VV_btn_on(self):
print(self.state_VV)
def _abort_btn(self):
print(self.state_abort)
def _run_btn(self):
print(self.state_run)
def send_to_server(self):
print("")
def main():
app = QtWidgets.QApplication(sys.argv)
main = MainWindow()
main.show()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import yaml
from ansible.module_utils.six import PY3
from ansible.parsing.yaml.objects import AnsibleUnicode, AnsibleSequence, AnsibleMapping, AnsibleVaultEncryptedUnicode
from ansible.utils.unsafe_proxy import AnsibleUnsafeText
from ansible.vars.hostvars import HostVars, HostVarsVars
class AnsibleDumper(yaml.SafeDumper):
'''
A simple stub class that allows us to add representers
for our overridden object types.
'''
pass
def represent_hostvars(self, data):
return self.represent_dict(dict(data) | )
# Note: only want to represent the encrypted data
def represent_vault_encrypted_unicode(self, data):
return self.represent_scalar(u'!vault', data._ciphertext.decode(), style='|')
if PY3:
represent_unicode = yaml.representer.SafeRepresenter. | represent_str
else:
represent_unicode = yaml.representer.SafeRepresenter.represent_unicode
AnsibleDumper.add_representer(
AnsibleUnicode,
represent_unicode,
)
AnsibleDumper.add_representer(
AnsibleUnsafeText,
represent_unicode,
)
AnsibleDumper.add_representer(
HostVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
HostVarsVars,
represent_hostvars,
)
AnsibleDumper.add_representer(
AnsibleSequence,
yaml.representer.SafeRepresenter.represent_list,
)
AnsibleDumper.add_representer(
AnsibleMapping,
yaml.representer.SafeRepresenter.represent_dict,
)
AnsibleDumper.add_representer(
AnsibleVaultEncryptedUnicode,
represent_vault_encrypted_unicode,
)
|
#
# This file is part of Invenio.
# Copyright (C) 2016, 2017 CERN.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suit | e 330, Boston,
# MA 02111-1307, USA.
#
# In applying this license, CERN does not
# waive the privileges and immunities granted to it by virtue of its status
# as an Intergovernmental Organization | or submit itself to any jurisdiction.
"""Create access branch."""
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = '67ba0de65fbb'
down_revision = None
branch_labels = (u'invenio_access', )
depends_on = 'dbdbc1b19cf2'
def upgrade():
"""Upgrade database."""
def downgrade():
"""Downgrade database."""
|
imp | ort angr
######################################
# socket
######################################
class socket(angr.SimProcedure):
#pylint:disable=arguments-differ
def run(self, domain, typ, protocol):
conc_domain = self.state.solver.eval(domain)
conc_typ = self.state.solver.eval(typ)
conc_protocol = self.state.solver.eval(protocol)
if self.state.posix.uid != 0 and conc_typ == 3: # SOCK_RAW
return self.state.libc.ret_errno('EPERM | ')
nonce = self.state.globals.get('socket_counter', 0) + 1
self.state.globals['socket_counter'] = nonce
fd = self.state.posix.open_socket(('socket', conc_domain, conc_typ, conc_protocol, nonce))
return fd
|
sfh_tests
import ResolvedStellarPops as rsp
from TPAGBparams import snap_src
import galaxy_tests
def ms_color_cut():
comp90 = sfh_tests.read_completeness_table(absmag=True)
tri_dir = os.environ['TRILEGAL_ROOT']
# make these simulations by editing a constant sf trilegal file
# using tab_sfr/ as a source of templates.
sgals = [rsp.Galaxies.simgalaxy(tri_dir + 'const_sfr_out_z06.dat',
filter1='F606W', filter2='F814W'),
rsp.Galaxies.simgalaxy(tri_dir + 'const_sfr_out_z0006.dat',
filter1='F606W', filter2='F814W')]
dline = {}
for band in ['opt', 'ir']:
if band == 'opt':
filter2 = 'F814W'
else:
filter2 = 'F160W'
for sgal in sgals:
sgal.all_stages()
if not sgal.name in dline.keys():
dline[sgal.name] = {}
# only consider MS stars that are brighter than the faintest
# 90% complteness mag in the sample
iblue, = np.nonzero(sgal.data.get_col(filter2) <
np.max(comp90['%s_filter2' % band]))
ims = list(set(iblue) & set(sgal.ims))
ibheb = list(set(iblue) & set(sgal.ibheb))
ims = list(np.concatenate([ims, ibheb]))
if len(ims) == 0:
# could be an issue that no MS stars are around...
print 'warning', filter2, 'no MS found.'
continue
if band == 'opt':
dline[sgal.name]['F606W'] = np.max(sgal.data.get_col('F606W')[ims])
dline[sgal.name]['F814W'] = np.max(sgal.data.get_col('F814W')[ims])
dline[sgal.name]['F475W'] = np.max(sgal.data.get_col('F475W')[ims])
else:
dline[sgal.name]['F110W'] = np.max(sgal.data.get_col('F110W')[ims])
dline[sgal.name]['F160W'] = np.max(sgal.data.get_col('F160W')[ims])
return dline
def color_cut_per_galaxy(table='default'):
ms_dict = ms_color_cut()
comp90 = sfh_tests.read_completeness_table(table=table, absmag=True)
comp90_uncert = sfh_tests.read_completeness_table(table=table, uncertainties=True)
#if table == 'default':
# table = snap_src + '/tables/completeness_0.90.dat'
#table = table.replace('.dat', '_colorcuts.dat')
photsys = 'wfc3snap'
#fmt = '%(target)s %(opt_colorcut).3f %(ir_colorcut).3f \n'
for sgalname, dline in ms_dict.items():
print sgalname
for i, target in enumerate(comp90['target']):
for band in ['opt', 'ir']:
if band == 'opt':
filter1 = sfh_tests.get_filter1(target.lower())
filter2 = 'F814W'
else:
filter1 = 'F110W'
filter2 = 'F160W'
m2m = {'target': target, 'filter2': filter2, 'filter1': filter1}
if not filter1 in dline.keys():
continue
Mag1 = dline[filter1]
Mag2 = dline[filter2]
mag1 = rsp.astronomy_utils.Mag2mag(Mag1, filter1, photsys, **m2m)
mag2 = rsp.astronomy_utils.Mag2mag(Mag2, filter2, photsys, **m2m)
color = mag1 - mag2
color_uncert = comp90_uncert[i]['%s_color' % band]
print target, filter1, filter2, '%.2f' % (color+color_uncert)
def find_contamination_by_phases(output_files=None):
if output_files is None:
output_files = [ snap_src + '/models/varysfh/ddo71/caf09_s_nov13/mc/output_ddo71_caf09_s_nov13.dat',
#snap_src + '/models/varysfh/ddo78/caf09_s_nov13/mc/output_ddo78_caf09_s_nov13.dat',
snap_src + '/models/varysfh/hs117/caf09_s_nov13/mc/output_hs117_caf09_s_nov13.dat',
#snap_src + '/models/varysfh/kdg73/caf09_s_nov13/mc/output_kdg73_caf09_s_nov13.dat',
snap_src + '/models/varysfh/kkh37/caf09_s_nov13/mc/output_kkh37_caf09_s_nov13.dat']#,
#snap_src + '/models/varysfh/ngc2976-deep/caf09_s_nov13/mc/output_ngc2976-deep_caf09_s_nov13.dat',
#snap_src + '/models/varysfh/ngc404/caf09_s_nov13/mc/output_ngc404_caf09_s_nov13.dat']
for output_file in output_files:
target = output_file.split('output_')[1].split('_')[0]
print target
filter1 = sfh_tests.get_filter1(target)
ds = sfh_tests.Diagnostics(VarySFH_kw={'target': target})
ds.mc = False
sgal = rsp.Galaxies.simgalaxy(output_file, filter1=filter1,
filter2='F814W')
sgal.target = target
sopt_rgb, sopt_agb, sir_rgb, sir_agb = \
ds.do_normalization(filter1=filter1, trilegal_output=output_file,
hist_it_up=False, dry_run=True)
ds.contamination_by_phases(sopt_rgb, sopt_agb, sir_rgb, sir_agb)
return
def completeness_table_absmag(table='default'):
'''
convert the completeness table mags to abs mag.
outfile = [table]_absmag.dat
'''
comp90 = sfh_tests.read_completeness_table(table)
if table == 'default':
table = snap_src + '/tables/completeness_0.90.dat'
table = table.replace('.dat', '_absmag.dat')
photsys = 'wfc3snap'
fmt = '%(target)s %(opt_filter1).3f %(opt_filter2).3f %(ir_filter1).3f %(ir_filter2).3f \n'
with open(table, 'w') a | s out:
for i, target in enumerate(comp90['target']):
dline = {'target': target}
for band in ['opt', 'ir']:
if band == 'opt':
filter1 = sfh_tests.get_filter1(target.lower())
filter2 = 'F814W'
else:
filter1 = 'F110W'
| filter2 = 'F160W'
m2m = {'target': target, 'filter2': filter2, 'filter1': filter1}
compf1 = comp90[i]['%s_filter1' % band]
dline['%s_filter1' % band] = rsp.astronomy_utils.mag2Mag(compf1, filter1, photsys, **m2m)
compf2 = comp90[i]['%s_filter2' % band]
dline['%s_filter2' % band] = rsp.astronomy_utils.mag2Mag(compf2, filter2, photsys, **m2m)
out.write(fmt % dline)
print 'wrote %s' % table
def uncertainties_at_completeness(table='default', binwidth=0.1):
'''
write a table with the median uncertainties around the completeness value
from the completeness table.
'''
comp90 = sfh_tests.read_completeness_table(table)
if table == 'default':
table = snap_src + '/tables/completeness_0.90.dat'
table = table.replace('.dat', '_uncertainties.dat')
opt_fits_src = snap_src + '/data/angst_no_trim'
fmt = '%(target)s %(opt_filter1).3f %(opt_filter2).3f %(opt_color).3f %(ir_filter1).3f %(ir_filter2).3f %(ir_color).3f \n'
title = '# ' + fmt.replace('%','').replace(')', '').replace('.3f','').replace('s','').replace('(','')
with open(table, 'w') as out:
out.write('# median uncertainty within +/-%.2f of completeness mag\n' % (binwidth/2))
out.write(title)
for i, target in enumerate(comp90['target']):
ir_gal = galaxy_tests.load_galaxy(target, band='ir')
opt_gal = galaxy_tests.load_galaxy(target, band='opt',
fits_src=opt_fits_src)
dline = {'target': target}
for band, gal in zip(['opt', 'ir'], [opt_gal, ir_gal]):
key = '%s_filter1' % band
uncerts1, = np.nonzero((gal.mag1 < comp90[i][key] + binwidth/2) &
(gal.mag1 > comp90[i][key] - binwidth/2))
med_unct1 = np.median(gal.data.MAG1_ERR[uncerts1])
dline[key] = med_unct1
key = '%s_filter2' % band
uncerts2, = np.nonzero((gal.mag2 < comp90[i][key] + binwidth/2) &
(gal.mag2 > comp90[i][key] - binwidth/2))
med_unct2 = np.median(gal.data.MAG2_ERR[uncerts2])
dlin |
(2, 3): 23.240228721514185,
(2, 1): -23.9264368052234,
(2, 4): 0.63465512968445115,
(4, 2): -4.5501817884252063,
(1, 0): 20.307078207040306,
(0, 3): np.nan,
(4, 0): -14.421880216023439,
(0, 1): np.nan,
(3, 3): -6.5845079821965991,
(4, 1): -19.329775838349192,
(3, 1): 18.084232469105203,
(4, 4): 24.644945052453025,
(0, 2): np.nan,
(2, 0): 5.6292750381105723,
(4, 3): 13.209596167161628,
(2, 2): -3.7469188310869228,
(3, 4): -17.381636024737336,
(1, 1): 13.827909766138866}
assert_almost_equal(x_diffs, real_x_diffs)
def test_get_speeds():
trajs = Trajectories(data.brownian_trajs_df())
speeds = trajs.get | _speeds().tolist()
real_speeds = [np.nan,
np.nan,
np.nan,
np.nan,
np.nan,
| 857.99153458573994,
1596.9530747771976,
873.15267834726137,
1282.3088174598233,
408.98588960526808,
378.40023709328955,
1809.9895146014187,
917.93227668556324,
592.31881736181106,
0.48325048326444919,
0.39551116881922965,
798.29858694043128,
1085.3214310682606,
405.49164945495221,
550.37555144616226,
1406.707586739079,
1031.9444945962532,
1077.6619763794718,
1445.7789239945778,
739.66839622816326]
assert_almost_equal(speeds, real_speeds)
def test_scale():
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
scaled = trajs.scale(factors=[2., 2., 2.],
coords=['x', 'y', 'z'], inplace=False)
assert_array_almost_equal(scaled[['x', 'y', 'z']] / 2., trajs[['x', 'y', 'z']])
trajs = trajs.scale(factors=[2., 2., 2.],
coords=['x', 'y', 'z'], inplace=True)
assert_array_almost_equal(scaled[['x', 'y', 'z']], trajs[['x', 'y', 'z']])
assert_raises(ValueError, trajs.scale, factors=[2., 2., 2.], coords=['x', 'y'], inplace=False)
def test_project():
trajs = Trajectories(data.directed_motion_trajs_df())
trajs.rename(columns={'true_label': 'new_label'}, inplace=True)
trajs.relabel()
trajs.project([0, 1],
coords=['x', 'y'],
keep_first_time=False,
reference=None,
inplace=True,
progress=False)
excepted = np.array([[ 0.27027431, 0. ],
[-0.27027431, 0. ],
[-0.25306519, 0.69683713],
[ 0.04633664, 0.31722648]])
assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4])
trajs = trajs.project([0, 1],
coords=['x', 'y'],
keep_first_time=False,
reference=None,
inplace=False,
progress=False)
assert_array_almost_equal(excepted, trajs.loc[:,['x_proj', 'y_proj']].values[:4])
assert_raises(ValueError, trajs.project, [0, 1], coords=['x', 'y', 'z', 't'])
def test_get_colors():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
colors = trajs.get_colors()
assert colors == {0: '#FF0000', 1: '#ADFF00', 2: '#00FFA9', 3: '#0408FF', 4: '#FF00AC'}
colors = trajs.get_colors(alpha=0.5)
assert colors == {0: '#FF000080',
1: '#ADFF0080',
2: '#00FFA980',
3: '#0408FF80',
4: '#FF00AC80'}
colors = trajs.get_colors(rgba=True)
good_colors = {0: (1.0, 0.0, 0.0, 1.0),
1: (0.67977809154279767, 1.0, 0.0, 1.0),
2: (0.0, 1.0, 0.66360181783683614, 1.0),
3: (0.015440535661123769, 0.031618928677752463, 1.0, 1.0),
4: (1.0, 0.0, 0.67279469669175529, 1.0)}
assert colors == good_colors
def test_get_longest_segments():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert trajs.get_longest_segments(1) == [4]
def test_get_shortest_segments():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
assert trajs.get_shortest_segments(1) == [0]
def test_remove_segments():
"""
"""
trajs = data.brownian_trajs_df()
trajs = Trajectories(trajs)
trajs.remove_segments(1, inplace=True)
assert np.all(trajs.labels == [0, 2, 3, 4])
def test_merge():
"""
"""
trajs1 = Trajectories(data.brownian_trajs_df())
trajs2 = Trajectories(data.brownian_trajs_df())
new = trajs1.merge(trajs2)
assert len(trajs1.labels) + len(trajs2.labels) == len(new.labels)
def test_relabel():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs.columns = ['x', 'y', 'z', 'new_label', 't']
trajs.relabel(inplace=True)
new_values = [[1.933058243735795, -14.581064591435775, 11.603556633147544, 0.0],
[-12.862215173899491, -2.8611502446443238, -2.2738941196781424, 0.0],
[9.100887851132633, 2.837252570763561, 2.875753940450461, 0.0],
[-9.253860446235523, 11.345550876585719, 22.118203258275745, 0.0]]
assert trajs.iloc[:4].values.tolist() == new_values
trajs = Trajectories(data.brownian_trajs_df())
trajs.columns = ['x', 'y', 'z', 'new_label', 't']
trajs = trajs.relabel(inplace=False)
new_values = [[1.933058243735795, -14.581064591435775, 11.603556633147544, 0.0],
[-12.862215173899491, -2.8611502446443238, -2.2738941196781424, 0.0],
[9.100887851132633, 2.837252570763561, 2.875753940450461, 0.0],
[-9.253860446235523, 11.345550876585719, 22.118203258275745, 0.0]]
assert trajs.iloc[:4].values.tolist() == new_values
def test_relabel_fromzero():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
original_labels = trajs.labels
idx = pd.IndexSlice
trajs.loc[idx[:, 1], :] = 55
relabeled = trajs.relabel_fromzero('label', inplace=False)
assert np.all(relabeled.labels == original_labels)
trajs.loc[idx[:, 1], :] = 55
relabeled = trajs.relabel_fromzero('label', inplace=False)
assert np.all(relabeled.labels == original_labels)
def test_remove_spots():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
new_trajs = trajs.remove_spots([(3, 2), (0, 0)], inplace=False)
new_indexes = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2),
(1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
(3, 0), (3, 1), (3, 3), (3, 4), (4, 0), (4, 1), (4, 2),
(4, 3), (4, 4)]
assert new_trajs.index.tolist() == new_indexes
new_trajs = trajs.remove_spots((0, 0), inplace=False)
new_indexes = [(0, 1), (0, 2), (0, 3), (0, 4), (1, 0), (1, 1), (1, 2),
(1, 3), (1, 4), (2, 0), (2, 1), (2, 2), (2, 3), (2, 4),
(3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (4, 0), (4, 1),
(4, 2), (4, 3), (4, 4)]
assert new_trajs.index.tolist() == new_indexes
def test_merge_segments():
"""
"""
trajs = Trajectories(data.brownian_trajs_df())
trajs.reset_index(inplace=True)
trajs.loc[15, ['label']] = 88
trajs.loc[20, ['label']] = 88
trajs.set_index(['t_stamp', 'label'], inplace=True)
new_trajs = trajs.merge_segments([0, 88], inplace=False)
assert_array_almost_equal(trajs.values, new_trajs.values)
trajs = Trajectories(data.brownian_trajs_df())
good_trajs = trajs.copy()
trajs.r |
um=chunk.nansum, numel=nannumel),
partial(moment_agg, sum=np.nansum, ddof=ddof), axis=axis,
keepdims=keepdims, dtype=dt, split_every=split_every,
combine=partial(moment_combine, sum=np.nansum))
with ignoring(AttributeError):
nanvar = wraps(chunk.nanvar)(nanvar)
@wraps(chunk.std)
def std(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
result = sqrt(a.var(axis=axis, dtype=dtype, keepdims=keepdims, ddof=ddof,
split_every=split_every))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
def nanstd(a, axis=None, dtype=None, keepdims=False, ddof=0, split_every=None):
result = sqrt(nanvar(a, axis=axis, dtype=dtype, keepdims=keepdims,
ddof=ddof, split_every=split_every))
if dtype and dtype != result.dtype:
result = result.astype(dtype)
return result
with ignoring(AttributeError):
nanstd = wraps(chunk.nanstd)(nanstd)
def vnorm(a, ord=None, axis=None, dtype=None, keepdims=False, split_every=None):
""" Vector norm
See np.linalg.norm
"""
if ord is None or ord == 'fro':
ord = 2
if ord == np.inf:
return max(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif ord == -np.inf:
return min(abs(a), axis=axis, keepdims=keepdims, split_every=split_every)
elif ord == 1:
return sum(abs(a), axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)
elif ord % 2 == 0:
return sum(a**ord, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)**(1./ord)
else:
return sum(abs(a)**ord, axis=axis, dtype=dtype, keepdims=keepdims,
split_every=split_every)**(1./ord)
def _arg_combine(data, axis, argfunc, keepdims=False):
"""Merge intermediate results from ``arg_*`` functions"""
axis = None if len(axis) == data.ndim or data.ndim == 1 else axis[0]
vals = data['vals']
arg = data['arg']
if axis is None:
local_args = argfunc(vals, axis=axis, keepdims=keepdims)
vals = vals.ravel()[local_args]
arg = arg.ravel()[local_args]
else:
local_args = argfunc(vals, axis=axis)
inds = np.ogrid[tuple(map(slice, local_args.shape))]
inds.insert(axis, local_args)
vals = vals[inds]
arg = arg[inds]
if keepdims:
vals = np.expand_dims(vals, axis)
arg = np.expand_dims(arg, axis)
return arg, vals
def arg_chunk(func, argfunc, x, axis, offset_info):
arg_axis = None if len(axis) == x.ndim or x.ndim == 1 else axis[0]
vals = func(x, axis=arg_axis, keepdims=True)
arg = argfunc(x, axis=arg_axis, keepdims=True)
if arg_axis is None:
offset, total_shape = offset_info
ind = np.unravel_index(arg.ravel()[0], x.shape)
total_ind = tuple(o + i for (o, i) in zip(offset, ind))
arg[:] = np.ravel_multi_index(total_ind, total_shape)
else:
arg += offset_info
result = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype)])
result['vals'] = vals
result['arg'] = arg
return result
def arg_combine(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=True)
res | ult = np.empty(shape=vals.shape, dtype=[('vals', vals.dtype),
('arg', arg.dtype)])
result['vals'] = vals
result['arg'] = arg
return result
def arg_agg(func, argfunc, data, axis=None, **kwargs):
return _arg_combine(data, axi | s, argfunc, keepdims=False)[0]
def nanarg_agg(func, argfunc, data, axis=None, **kwargs):
arg, vals = _arg_combine(data, axis, argfunc, keepdims=False)
if np.any(np.isnan(vals)):
raise ValueError("All NaN slice encountered")
return arg
def arg_reduction(x, chunk, combine, agg, axis=None, split_every=None):
"""Generic function for argreduction.
Parameters
----------
x : Array
chunk : callable
Partialed ``arg_chunk``.
combine : callable
Partialed ``arg_combine``.
agg : callable
Partialed ``arg_agg``.
axis : int, optional
split_every : int or dict, optional
"""
if axis is None:
axis = tuple(range(x.ndim))
ravel = True
elif isinstance(axis, int):
if axis < 0:
axis += x.ndim
if axis < 0 or axis >= x.ndim:
raise ValueError("axis entry is out of bounds")
axis = (axis,)
ravel = x.ndim == 1
else:
raise TypeError("axis must be either `None` or int, "
"got '{0}'".format(axis))
# Map chunk across all blocks
name = 'arg-reduce-chunk-{0}'.format(tokenize(chunk, axis))
old = x.name
keys = list(product(*map(range, x.numblocks)))
offsets = list(product(*(accumulate(operator.add, bd[:-1], 0)
for bd in x.chunks)))
if ravel:
offset_info = zip(offsets, repeat(x.shape))
else:
offset_info = pluck(axis[0], offsets)
chunks = tuple((1,)*len(c) if i in axis else c for (i, c)
in enumerate(x.chunks))
dsk = dict(((name,) + k, (chunk, (old,) + k, axis, off)) for (k, off)
in zip(keys, offset_info))
tmp = Array(merge(dsk, x.dask), name, chunks)
return _tree_reduce(tmp, agg, axis, False, np.int64, split_every, combine)
def make_arg_reduction(func, argfunc, is_nan_func=False):
"""Create a argreduction callable.
Parameters
----------
func : callable
The reduction (e.g. ``min``)
argfunc : callable
The argreduction (e.g. ``argmin``)
"""
chunk = partial(arg_chunk, func, argfunc)
combine = partial(arg_combine, func, argfunc)
if is_nan_func:
agg = partial(nanarg_agg, func, argfunc)
else:
agg = partial(arg_agg, func, argfunc)
@wraps(argfunc)
def _(x, axis=None, split_every=None):
return arg_reduction(x, chunk, combine, agg, axis, split_every)
return _
def _nanargmin(x, axis, **kwargs):
try:
return chunk.nanargmin(x, axis, **kwargs)
except ValueError:
return chunk.nanargmin(np.where(np.isnan(x), np.inf, x), axis, **kwargs)
def _nanargmax(x, axis, **kwargs):
try:
return chunk.nanargmax(x, axis, **kwargs)
except ValueError:
return chunk.nanargmax(np.where(np.isnan(x), -np.inf, x), axis, **kwargs)
argmin = make_arg_reduction(chunk.min, chunk.argmin)
argmax = make_arg_reduction(chunk.max, chunk.argmax)
nanargmin = make_arg_reduction(chunk.nanmin, _nanargmin, True)
nanargmax = make_arg_reduction(chunk.nanmax, _nanargmax, True)
def cumreduction(func, binop, ident, x, axis, dtype=None):
""" Generic function for cumulative reduction
Parameters
----------
func: callable
Cumulative function like np.cumsum or np.cumprod
binop: callable
Associated binary operator like ``np.cumsum->add`` or ``np.cumprod->mul``
ident: Number
Associated identity like ``np.cumsum->0`` or ``np.cumprod->1``
x: dask Array
axis: int
dtype: dtype
Returns
-------
dask array
See also
--------
cumsum
cumprod
"""
if dtype is None:
dtype = func(np.empty((0,), dtype=x.dtype)).dtype
assert isinstance(axis, int)
m = x.map_blocks(func, axis=axis, dtype=dtype)
name = '%s-axis=%d-%s' % (func.__name__, axis, tokenize(x, dtype))
n = x.numblocks[axis]
full = slice(None, None, None)
slc = (full,) * axis + (slice(-1, None),) + (full,) * (x.ndim - axis - 1)
indices = list(product(*[range(nb) if i != axis else [0]
for i, nb in enumerate(x.numblocks)]))
dsk = dict()
for ind in indices:
shape = tuple(x.chunks[i][ii] if i != axis else 1
for i, ii in enumerate(ind))
dsk[(name, 'extra') + ind] = (np.full, shape, ident, m.dtype)
dsk[(name,) + ind] = (m.name,) + ind
for i in range(1 |
# Generated from STIXPattern.g4 by ANTLR 4.9.2
from antlr4 import *
if __name__ is not None and "." in __name__:
from .STIXPatternParser import STIXPatternParser
else:
from STIXPatternParser import STIXPatternParser
# This class defines a complete generic visitor for a parse tree produced by STIXPatternParser.
class STIXPatternVisitor(ParseTreeVisitor):
# Visit a parse tree produced by STIXPatternParser#pattern.
def visitPattern(self, ctx:STIXPatternParser.PatternContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressions.
def visitObservationExpressions(self, ctx:STIXPatternParser.ObservationExpressionsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionOr.
def visitObservationExpressionOr(self, ctx:STIXPatternParser.ObservationExpressionOrContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionAnd.
def visitObservationExpressionAnd(self, ctx:STIXPatternParser.ObservationExpressionAndContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionRepeated.
def visitObservationExpressionRepeated(self, ctx:STIXPatternParser.ObservationExpressionRepeatedContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionSimple.
def visitObservationExpressionSimple(self, ctx:STIXPatternParser.ObservationExpressionSimpleContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionCompound.
def visitObservationExpressionCompound(self, ctx:STIXPatternParser.ObservationExpressionCompoundContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionWithin.
def visitObservationExpressionWithin(self, ctx:STIXPatternParser.ObservationExpressionWithinContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#observationExpressionStartStop.
def visitObservationExpressionStartStop(self, ctx:STIXPatternParser.ObservationExpressionStartStopContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpression.
def visitComparisonExpression(self, ctx:STIXPatternParser.ComparisonExpressionContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#comparisonExpressionAnd.
def visitComparisonExpressionAnd(self, ctx:STIXPatternParser.ComparisonExpressionAndContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestEqual.
def visitPropTestEqual(self, ctx:STIXPatternParser.PropTestEqualContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestOrder.
def visitPropTestOrder(self, ctx:STIXPatternParser.PropTestOrderContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestSet.
def visitPropTestSet(self, ctx:STIXPatternParser.PropTestSetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestLike.
def visitPropTestLike(self, ctx:STIXPatternParser.PropTestLikeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestRegex.
def visitPropTestRegex(self, ctx:STIXPatternParser.PropTestRegexContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSubset.
def visitPropTestIsSubset(self, ctx:STIXPatternParser.PropTestIsSubsetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestIsSuperset.
def visitPropTestIsSuperset(self, ctx:STIXPatternParser.PropTestIsSupersetContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestParen.
def visitPropTestParen(self, ctx:STIXPatternParser.PropTestParenContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#propTestExists.
def visitPropTestExists(self, ctx:STIXPatternParser.PropTestExistsContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#startStopQualifier.
def visitStartStopQualifier(self, ctx:STIXPatternParser.StartStopQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#withinQualifier.
def visitWithinQualifier(self, ctx:STIXPatternParser.WithinQualifierContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#repeatedQualifier.
def visitRepeatedQualifier(self, ctx:STIXPatternParser.RepeatedQualifierContext):
return self.visitChildren(ctx)
# Visit a par | se tree produced by STIXPatternParser#objectPath.
def visitObjectPath(self, ctx:STIXPatternParser.ObjectPathContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#objectType.
def visitObjectType(self, ctx:STIXPatternParser.ObjectTypeContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#firstPathComponent.
def visitFirstPathComponent(self, ctx:STIXPatternParser.FirstPathComponentContext):
| return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#indexPathStep.
def visitIndexPathStep(self, ctx:STIXPatternParser.IndexPathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#pathStep.
def visitPathStep(self, ctx:STIXPatternParser.PathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#keyPathStep.
def visitKeyPathStep(self, ctx:STIXPatternParser.KeyPathStepContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#setLiteral.
def visitSetLiteral(self, ctx:STIXPatternParser.SetLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#primitiveLiteral.
def visitPrimitiveLiteral(self, ctx:STIXPatternParser.PrimitiveLiteralContext):
return self.visitChildren(ctx)
# Visit a parse tree produced by STIXPatternParser#orderableLiteral.
def visitOrderableLiteral(self, ctx:STIXPatternParser.OrderableLiteralContext):
return self.visitChildren(ctx)
del STIXPatternParser |
def Woody():
| # complete
print "Reach for the sky but don't burn your wings!"# this will make it much easier in future problems to see that something is actually happe | ning |
time
import sys
from decimal import Decimal
from HTMLParser import HTMLParseError
from time import strptime
import mylar
def GCDScraper(ComicName, ComicYear, Total, ComicID, quickmatch=None):
NOWyr = datetime.date.today().year
if datetime.date.today().month == 12:
NOWyr = NOWyr + 1
logger.fdebug("We're in December, incremented search Year to increase search results: " + str(NOWyr))
comicnm = ComicName.encode('utf-8').strip()
comicyr = ComicYear
comicis = Total
comicid = ComicID
#print ( "comicname: " + str(comicnm) )
#print ( "comicyear: " + str(comicyr) )
#print ( "comichave: " + str(comicis) )
#print ( "comicid: " + str(comicid) )
comicnm_1 = re.sub('\+', '%2B', comicnm)
comicnm = re.sub(' ', '+', comicnm_1)
input = 'http://www.comics.org/search/advanced/process/?target=series&method=icontains&logic=False&order2=date&order3=&start_date=' + str(comicyr) + '-01-01&end_date=' + str(NOWyr) + '-12-31&series=' + str(comicnm) + '&is_indexed=None'
response = urllib2.urlopen (input)
soup = BeautifulS | oup (response)
cnt1 = len(soup.findAll("tr", {"class": "listing_even"}))
cnt2 = len(soup.findAll("tr", {"class": "listing_odd"}))
| cnt = int(cnt1 + cnt2)
#print (str(cnt) + " results")
resultName = []
resultID = []
resultYear = []
resultIssues = []
resultURL = None
n_odd = -1
n_even = -1
n = 0
while (n < cnt):
if n%2==0:
n_even+=1
resultp = soup.findAll("tr", {"class": "listing_even"})[n_even]
else:
n_odd+=1
resultp = soup.findAll("tr", {"class": "listing_odd"})[n_odd]
rtp = resultp('a')[1]
resultName.append(helpers.cleanName(rtp.findNext(text=True)))
#print ( "Comic Name: " + str(resultName[n]) )
fip = resultp('a', href=True)[1]
resultID.append(fip['href'])
#print ( "ID: " + str(resultID[n]) )
subtxt3 = resultp('td')[3]
resultYear.append(subtxt3.findNext(text=True))
resultYear[n] = resultYear[n].replace(' ', '')
subtxt4 = resultp('td')[4]
resultIssues.append(helpers.cleanName(subtxt4.findNext(text=True)))
resiss = resultIssues[n].find('issue')
resiss = int(resiss)
resultIssues[n] = resultIssues[n].replace('', '')[:resiss]
resultIssues[n] = resultIssues[n].replace(' ', '')
#print ( "Year: " + str(resultYear[n]) )
#print ( "Issues: " + str(resultIssues[n]) )
CleanComicName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', comicnm)
CleanComicName = re.sub(' ', '', CleanComicName).lower()
CleanResultName = re.sub('[\,\.\:\;\'\[\]\(\)\!\@\#\$\%\^\&\*\-\_\+\=\?\/]', '', resultName[n])
CleanResultName = re.sub(' ', '', CleanResultName).lower()
#print ("CleanComicName: " + str(CleanComicName))
#print ("CleanResultName: " + str(CleanResultName))
if CleanResultName == CleanComicName or CleanResultName[3:] == CleanComicName:
#if resultName[n].lower() == helpers.cleanName(str(ComicName)).lower():
#print ("n:" + str(n) + "...matched by name to Mylar!")
#this has been seen in a few instances already, so trying to adjust.
#when the series year is 2011, in gcd it might be 2012 due to publication
#dates overlapping between Dec/11 and Jan/12. Let's accept a match with a
#1 year grace space, and then pull in the first issue to see the actual pub
# date and if coincides with the other date..match it.
if resultYear[n] == ComicYear or resultYear[n] == str(int(ComicYear) +1):
#print ("n:" + str(n) + "...matched by year to Mylar!")
#print ( "Year: " + str(resultYear[n]) )
#Occasionally there are discrepancies in comic count between
#GCD and CV. 99% it's CV not updating to the newest issue as fast
#as GCD does. Therefore, let's increase the CV count by 1 to get it
#to match, any more variation could cause incorrect matching.
#ie. witchblade on GCD says 159 issues, CV states 161.
if int(resultIssues[n]) == int(Total) or int(resultIssues[n]) == int(Total) +1 or (int(resultIssues[n]) +1) == int(Total):
#print ("initial issue match..continuing.")
if int(resultIssues[n]) == int(Total) +1:
issvariation = "cv"
elif int(resultIssues[n]) +1 == int(Total):
issvariation = "gcd"
else:
issvariation = "no"
#print ("n:" + str(n) + "...matched by issues to Mylar!")
#print ("complete match!...proceeding")
TotalIssues = resultIssues[n]
resultURL = str(resultID[n])
rptxt = resultp('td')[6]
resultPublished = rptxt.findNext(text=True)
#print ("Series Published: " + str(resultPublished))
break
n+=1
# it's possible that comicvine would return a comic name incorrectly, or gcd
# has the wrong title and won't match 100%...
# (ie. The Flash-2011 on comicvine is Flash-2011 on gcd)
# this section is to account for variations in spelling, punctuation, etc/
basnumbs = {'one': 1, 'two': 2, 'three': 3, 'four': 4, 'five': 5, 'six': 6, 'seven': 7, 'eight': 8, 'nine': 9, 'ten': 10, 'eleven': 11, 'twelve': 12}
if resultURL is None:
#search for number as text, and change to numeric
for numbs in basnumbs:
#print ("numbs:" + str(numbs))
if numbs in ComicName.lower():
numconv = basnumbs[numbs]
#print ("numconv: " + str(numconv))
ComicNm = re.sub(str(numbs), str(numconv), ComicName.lower())
#print ("comicname-reVISED:" + str(ComicNm))
return GCDScraper(ComicNm, ComicYear, Total, ComicID)
break
if ComicName.lower().startswith('the '):
ComicName = ComicName[4:]
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if ':' in ComicName:
ComicName = re.sub(':', '', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if '-' in ComicName:
ComicName = re.sub('-', ' ', ComicName)
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if 'and' in ComicName.lower():
ComicName = ComicName.replace('and', '&')
return GCDScraper(ComicName, ComicYear, Total, ComicID)
if not quickmatch: return 'No Match'
#vari_loop = 0
if quickmatch == "yes":
if resultURL is None: return 'No Match'
else: return 'Match'
return GCDdetails(comseries=None, resultURL=resultURL, vari_loop=0, ComicID=ComicID, TotalIssues=TotalIssues, issvariation=issvariation, resultPublished=resultPublished)
def GCDdetails(comseries, resultURL, vari_loop, ComicID, TotalIssues, issvariation, resultPublished):
gcdinfo = {}
gcdchoice = []
gcount = 0
i = 0
# datemonth = {'one':1,'two':2,'three':3,'four':4,'five':5,'six':6,'seven':7,'eight':8,'nine':9,'ten':10,'eleven':$
# #search for number as text, and change to numeric
# for numbs in basnumbs:
# #print ("numbs:" + str(numbs))
# if numbs in ComicName.lower():
# numconv = basnumbs[numbs]
# #print ("numconv: " + str(numconv))
if vari_loop > 1:
resultPublished = "Unknown"
if vari_loop == 99: vari_loop = 1
while (i <= vari_loop):
if vari_loop > 0:
try:
boong = comseries['comseries'][i]
except IndexError:
break
resultURL = boong['comseriesID']
ComicID = boong['comicid']
TotalIssues+= int(boong['comseriesIssues'])
else:
resultURL = resultURL
# if we're here - it means it's a mismatched name.
# let's pull down the public |
ssion
from flexget.logger import console
log = logging.getLogger('web_server')
_home = None
_app_register = {}
_default_app = Flask(__name__)
random = random.SystemRandom()
web_config_schema = {
'oneOf': [
{'type': 'boolean'},
{
'type': 'object',
'properties': {
'bind': {'type': 'string', 'format': 'ipv4', 'default': '0.0.0.0'},
'port': {'type': 'integer', 'default': 3539},
},
'additionalProperties': False
}
]
}
def generate_key():
""" Generate key for use to authentication """
return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())
def get_random_string(length=12, allowed_chars='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""
Returns a securely generated random string.
The default length of 12 with the a-z, A-Z, 0-9 character set returns
a 71-bit value. log_2((26+26+10)^12) =~ 71 bits.
Taken from the django.utils.crypto module.
"""
return ''.join(random.choice(allowed_chars) for __ in range(length))
@with_session
def get_secret(session=None):
pass
""" Generate a secret key for flask applications and store it in the database. """
web_secret = session.query(WebSecret).first()
if not web_secret:
web_secret = WebSecret(id=1, value=get_random_string(50, 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'))
sessi | on.add(web_secret)
session.commit()
return web_secret.value
class User(Base, UserMixin):
""" User class available for flask apps to handle authentication using flask_login """
__tablename__ = 'users'
id = Column(Integer, primary_key=True)
| name = Column(Unicode(50), unique=True)
token = Column(Unicode, default=generate_key)
password = Column(Unicode)
def __repr__(self):
return '<User %r>' % self.name
def get_id(self):
return self.name
class WebSecret(Base):
""" Store flask secret in the database """
__tablename__ = 'secret'
id = Column(Unicode, primary_key=True)
value = Column(Unicode)
@event('config.register')
def register_config():
register_config_key('web_server', web_config_schema)
def register_app(path, application):
if path in _app_register:
raise ValueError('path %s already registered')
_app_register[path] = application
def register_home(route):
"""Registers UI home page"""
global _home
_home = route
@_default_app.route('/')
def start_page():
""" Redirect user to registered UI home """
if not _home:
abort(404)
return redirect(_home)
@event('manager.daemon.started', -255) # Low priority so plugins can register apps
@with_session
def setup_server(manager, session=None):
""" Sets up and starts/restarts the web service. """
if not manager.is_daemon:
return
web_server_config = manager.config.get('web_server')
if not web_server_config:
return
web_server = WebServer(
bind=web_server_config['bind'],
port=web_server_config['port'],
)
_default_app.secret_key = get_secret()
# Create default flexget user
if session.query(User).count() == 0:
session.add(User(name="flexget", password="flexget"))
session.commit()
if web_server.is_alive():
web_server.stop()
if _app_register:
web_server.start()
@event('manager.shutdown_requested')
def stop_server(manager):
""" Sets up and starts/restarts the webui. """
if not manager.is_daemon:
return
web_server = WebServer()
if web_server.is_alive():
web_server.stop()
@singleton
class WebServer(threading.Thread):
# We use a regular list for periodic jobs, so you must hold this lock while using it
triggers_lock = threading.Lock()
def __init__(self, bind='0.0.0.0', port=5050):
threading.Thread.__init__(self, name='web_server')
self.bind = str(bind) # String to remove unicode warning from cherrypy startup
self.port = port
self.server = None
def start(self):
# If we have already started and stopped a thread, we need to reinitialize it to create a new one
if not self.is_alive():
self.__init__(bind=self.bind, port=self.port)
threading.Thread.start(self)
def _start_server(self):
from cherrypy import wsgiserver
apps = {'/': _default_app}
for path, registered_app in _app_register.iteritems():
apps[path] = registered_app
d = wsgiserver.WSGIPathInfoDispatcher(apps)
self.server = wsgiserver.CherryPyWSGIServer((self.bind, self.port), d)
try:
host = self.bind if self.bind != "0.0.0.0" else socket.gethostbyname(socket.gethostname())
except socket.gaierror:
host = '127.0.0.1'
log.info('Web interface available at http://%s:%s' % (host, self.port))
self.server.start()
def run(self):
self._start_server()
def stop(self):
log.info('Shutting down web server')
if self.server:
self.server.stop()
@with_session
def do_cli(manager, options, session=None):
try:
if hasattr(options, 'user'):
options.user = options.user.lower()
if options.action == 'list':
users = session.query(User).all()
if users:
max_width = len(max([user.name for user in users], key=len)) + 4
console('_' * (max_width + 56 + 9))
console('| %-*s | %-*s |' % (max_width, 'Username', 56, 'API Token'))
if users:
for user in users:
console('| %-*s | %-*s |' % (max_width, user.name, 56, user.token))
else:
console('No users found')
if options.action == 'add':
exists = session.query(User).filter(User.name == options.user).first()
if exists:
console('User %s already exists' % options.user)
return
user = User(name=options.user, password=options.password)
session.add(user)
session.commit()
console('Added %s to the database with generated API Token: %s' % (user.name, user.token))
if options.action == 'delete':
user = session.query(User).filter(User.name == options.user).first()
if not user:
console('User %s does not exist' % options.user)
return
session.delete(user)
session.commit()
console('Deleted user %s' % options.user)
if options.action == 'passwd':
user = session.query(User).filter(User.name == options.user).first()
if not user:
console('User %s does not exist' % options.user)
return
user.password = options.password
session.commit()
console('Updated password for user %s' % options.user)
if options.action == 'gentoken':
user = session.query(User).filter(User.name == options.user).first()
if not user:
console('User %s does not exist' % options.user)
return
user.token = generate_key()
session.commit()
console('Generated new token for user %s' % user.name)
console('Token %s' % user.token)
finally:
session.close()
@event('options.register')
def register_parser_arguments():
parser = options.register_command('users', do_cli, help='Manage users providing access to the web server')
subparsers = parser.add_subparsers(dest='action', metavar='<action>')
subparsers.add_parser('list', help='List users')
add_parser = subparsers.add_parser('add', help='add a new user')
add_parser.add_argument('user', metavar='<username>', help='Users login')
add_parser.add_argument('password', metavar='<password>', help='Users password')
del_parser = subparsers.add_parser('delete', help='delete a new user')
del_parser.add_argument('user', metavar='<username>', help='Logi |
s) to contain current inquiries to
self._restriction = None
self._also_restriction = None
self._subset = None
if isinstance(host_list, basestring):
if "," in host_list:
host_list = host_list.split(",")
host_list = [ h for h in host_list if h and h.strip() ]
if host_list is None:
self.parser = None
elif isinstance(host_list, list):
self.parser = None
all = Group('all')
self.groups = [ all ]
ipv6_re = re.compile('\[([a-f:A-F0-9]*[%[0-z]+]?)\](?::(\d+))?')
for x in host_list:
m = ipv6_re.match(x)
if m:
all.add_host(Host(m.groups()[0], m.groups()[1]))
else:
if ":" in x:
tokens = x.rsplit(":", 1)
# if there is ':' in the address, then this is a ipv6
if ':' in tokens[0]:
all.add_host(Host(x))
else:
all.add_host(Host(tokens[0], tokens[1]))
else:
all.add_host(Host(x))
elif os.path.exists(host_list):
if os.path.isdir(host_list):
# Ensure basedir is inside the directory
self.host_list = os.path.join(self.host_list, "")
self.parser = InventoryDirectory(filename=host_list)
self.groups = self.parser.groups.values()
elif utils.is_executable(host_list):
self.parser = InventoryScript(filename=host_list)
self.groups = self.parser.groups.values()
else:
self.parser = InventoryParser(filename=host_list)
self.groups = self.parser.groups.values()
utils.plugins.vars_loader.add_directory(self.basedir(), with_subdir=True)
else:
raise errors.AnsibleError("Unable to find an inventory file, specify one with -i ?")
self._vars_plugins = [ x for x in utils.plugins.vars_loader.all(self) ]
def _match(self, str, pattern_str):
if pattern_str.startswith('~'):
return re.search(pattern_str[1:], str)
else:
return fnmatch.fnmatch(str, pattern_str)
def get_hosts(self, pattern="all"):
"""
find all host names matching a pattern string, taking into account any inventory restrictions or
applied subsets.
"""
# process patterns
if isinstance(pattern, list):
pattern = ';'.join(pattern)
patterns = pattern.replace(";",":").split(":")
hosts = self._get_hosts(patterns)
# exclude hosts not in a subset, if defined
if self._subset:
subset = self._get_hosts(self._subset)
new_hosts = []
for h in hosts:
if h in subset and h not in new_hosts:
new_hosts.append(h)
hosts = new_hosts
# exclude hosts mentioned in any restriction (ex: failed hosts)
if self._restriction is not None:
hosts = [ h for h in hosts if h.name in self._restriction ]
if self._also_restriction is not None:
hosts = [ h for h in hosts if h.name in self._also_restriction ]
return hosts
def _get_hosts(self, patterns):
"""
finds hosts that match a list of patterns. Handles negative
matches as well as intersection matches.
"""
# Host specifiers should be sorted to ensure consistent behavior
pattern_regular = []
pattern_intersection = []
pattern_exclude = []
for p in patterns:
if p.startswith("!"):
pattern_exclude.append(p)
elif p.startswith("&"):
pattern_intersection.append(p)
else:
pattern_regular.append(p)
# if no regular pattern was given, hence only exclude and/or intersection
# make that magically work
if pattern_regular == []:
pattern_regular = ['all']
# when applying the host selectors, run those without the "&" or "!"
# first, then the &s, then the !s.
patterns = pattern_regular + pattern_intersection + pattern_exclude
hosts = []
for p in patterns:
that = self.__get_hosts(p)
if p.startswith("!"):
hosts = [ h for h in hosts if h not in that ]
elif p.startswith("&"):
hosts = [ h for h in hosts if h in that ]
else:
for h in that:
if h not in hosts:
hosts.append(h)
return hosts
def __get_hosts(self, pattern):
"""
finds hosts that postively match a particular pattern. Does not
take into account negative matches.
"""
if pattern in self._pattern_cache:
return self._pattern_cache[pattern]
(name, enumeration_details) = self._enumeration_info(pattern)
hpat = self._hosts_in_unenumerated_pattern(name)
result = self._apply_ranges(pattern, hpat)
self._pattern_cache[pattern] = result
return result
def _enumeration_info(self, pattern):
"""
returns (pattern, limits) taking a regular pattern and finding out
which | parts of it correspond to start/stop offsets. limits is
a tuple of (start, stop) or None
"""
if not "[" in pattern or pattern.startswith('~'):
return (pattern, None) |
(first, rest) = pattern.split("[")
rest = rest.replace("]","")
try:
# support selectors like webservers[0]
x = int(rest)
return (first, (x,x))
except:
pass
if "-" in rest:
(left, right) = rest.split("-",1)
return (first, (left, right))
elif ":" in rest:
(left, right) = rest.split(":",1)
return (first, (left, right))
else:
return (first, (rest, rest))
def _apply_ranges(self, pat, hosts):
"""
given a pattern like foo, that matches hosts, return all of hosts
given a pattern like foo[0:5], where foo matches hosts, return the first 6 hosts
"""
(loose_pattern, limits) = self._enumeration_info(pat)
if not limits:
return hosts
(left, right) = limits
if left == '':
left = 0
if right == '':
right = 0
left=int(left)
right=int(right)
if left != right:
return hosts[left:right]
else:
return [ hosts[left] ]
def _hosts_in_unenumerated_pattern(self, pattern):
""" Get all host names matching the pattern """
hosts = []
# ignore any negative checks here, this is handled elsewhere
pattern = pattern.replace("!","").replace("&", "")
results = []
groups = self.get_groups()
for group in groups:
for host in group.get_hosts():
if pattern == 'all' or self._match(group.name, pattern) or self._match(host.name, pattern):
if host not in results:
results.append(host)
return results
def clear_pattern_cache(self):
''' called exclusively by the add_host plugin to allow patterns to be recalculated '''
self._pattern_cache = {}
def groups_for_host(self, host):
results = []
groups = self.get_groups()
for group in groups:
for hostn in group.get_hosts():
if host == hostn.name:
results.append(group)
continue
return results
def groups_list(self):
if not self._groups_list:
groups = {}
for g in self.groups:
groups[g.name] = [h.name for h in g.get_hosts()]
ancestors = g.get_ancestors()
for a in ancestors:
if a.name not in grou |
import re, requests, csv, time, traceback
from bs4 import BeautifulSoup
teams = []
for group in ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']:
try:
url = "http://www.oddschecker.com/football/champions-league/champions-league-group-%s/to-qualify" % group
print "getting {}".format(url)
soup = BeautifulSoup(requests.get(url, cookies={"odds_type":" | decimal"}).text)
table = soup.find(attrs={"class":"eventTable"})
sitesrow = table.find_all("tr", {"class": "eventTableHeader"})
sitelinks = sitesrow[0].find_all(lambda t: t.has_attr("title"))
sites = [t["title"] for t in sitelinks]
teamrows = table.find_al | l(attrs={"class": "eventTableRow"})
for row in teamrows:
cols = [t.text for t in row.find_all("td")]
name = cols[1]
if 'any other' in name.lower(): continue
odds = []
isanodd = lambda t: (t.name=="td" and t.has_attr("class") and
('o' in t.attrs["class"] or
'oi' in t.attrs["class"] or
'oo' in t.attrs["class"]))
rawodds = [t.text for t in row.find_all(isanodd)]
for o in rawodds:
if not o or '-' in o: odds.append(None)
else: odds.append(float(o))
assert len(odds) == len(sites), "{} {}".format(odds, sites)
teams.append([name, group] + odds)
except:
print "Unexpected error. skipping"
traceback.print_exc()
t = str(time.time()).split(".")[0]
with file("raw/odds%s.csv" % t, 'w') as outfile:
w = csv.writer(outfile)
w.writerow(['name', 'group'] + sites)
for row in teams:
w.writerow(row)
|
_autoconfig=None):
return client(request).node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
description=description,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=node_processes,
node_configs=node_configs,
floating_ip_pool=floating_ip_pool,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=volumes_availability_zone,
volume_type=volume_type,
is_proxy_gateway=is_proxy_gateway,
volume_local_to_instance=volume_local_to_instance,
use_autoconfig=use_autoconfig)
def nodegroup_template_list(request, search_opts=None):
return client(request).node_group_templates.list(search_opts=search_opts)
def nodegroup_template_get(request, ngt_id):
return client(request).node_group_templates.get(ng_template_id=ngt_id)
def nodegroup_template_find(request, **kwargs):
return client(request).node_group_templates.find(**kwargs)
def nodegroup_template_delete(request, ngt_id):
client(request).node_group_templates.delete(ng_template_id=ngt_id)
def nodegroup_template_update(request, ngt_id, name, plugin_name,
hadoop_version, flavor_id,
description=None, volumes_per_node=None,
volumes_size=None, node_processes=None,
node_configs=None, floating_ip_pool=None,
security_groups=None, auto_security_group=False,
availability_zone=False,
volumes_availability_zone=False,
volume_type=None,
is_proxy_gateway=False,
volume_local_to_instance=False,
use_autoconfig=None):
return client(request).node_group_templates.update(
ng_template_id=ngt_id,
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
description=description,
volumes_per_node=volumes_per_node,
volumes_size=volumes_size,
node_processes=node_processes,
node_configs=node_configs,
floating_ip_pool=floating_ip_pool,
security_groups=security_groups,
auto_security_group=auto_security_group,
availability_zone=availability_zone,
volumes_availability_zone=volumes_availability_zone,
volume_type=volume_type,
is_proxy_gateway=is_proxy_gateway,
volume_local_to_instance=volume_local_to_instance,
use_autoconfig=use_autoconfig)
def cluster_template_create(request, name, plugin_name, hadoop_version,
description=None, cluster_configs=None,
node_groups=None, anti_affinity=None,
net_id=None, use_autoconfig=None):
return client(request).cluster_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
anti_affinity=anti_affinity,
net_id=net_id,
use_autoconfig=use_autoconfig)
def cluster_template_list(request, search_opts=None):
return client(request).cluster_templates.list(search_opts=search_opts)
def cluster_template_get(request, ct_id):
return client(request).cluster_templates.get(cluster_template_id=ct_id)
def cluster_template_delete(request, ct_id):
client(request).cluster_templates.delete(cluster_template_id=ct_id)
def cluster_template_update(request, ct_id, name, plugin_name,
hadoop_version, description=None,
cluster_configs=None, node_groups=None,
anti_affinity=None, net_id=None,
use_autoconfig=None):
try:
template = client(request).cluster_templates.update(
cluster_template_id=ct_id,
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
anti_affinity=anti_affinity,
net_id=net_id,
use_autoconfig=use_autoconfig)
except APIException as e:
raise exceptions.Conflict(e)
return template
def cluster_create(request, name, plugin_name, hadoop_version,
cluster_template_id=None, default_image_id=None,
is_transient=None, description=None, cluster_configs=None,
node_groups=None, user_keypair_id=None,
anti_affinity=None, net_id=None, use_autoconfig=None):
return client(request).clusters.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
cluster_template_id=cluster_template_id,
default_image_id=default_image_id,
is_transient=is_transient,
description=description,
cluster_configs=cluster_configs,
node_groups=node_groups,
user_keypair_id=user_keypair_id,
anti_affinity=anti_affinity,
net_id=net_id,
use_autoconfig=use_autoconfig)
def cluster_scale(request, cluster_id, scale_object):
return client(request).clusters.scale(
cluster_id=cluster_id,
scale_object=scale_object)
def cluster_list(request, search_opts=None):
return client(request).clusters.list(search_opts=search_opts)
def cluster_get(request, cluster_id, show_progress=False):
return client(request).clusters.get(
cluster_id=cluster_id,
show_progress=show_progress)
def cluster_delete(request, cluster_id):
client(request).clusters.delete(cluster_id=cluster_id)
def data_source_create(request, name, description, ds_type, url,
credential_user=None, credential_pass=None):
return client(request).data_sources.create(
name=name,
description=description,
data_source_type=ds_type,
url=url,
credential_user=credential_user,
credential_pass=credential_pass)
def data_source_list(request, search_opts=None):
return client(request).data_sources.list(search_opts=search_opts)
def data_source_get(request, ds_id):
return client(request).data_sources.get(data_source_id=ds_id)
def data_source_delete(request, ds_id):
client(request).data_sources.delete(data_source_id=ds_id)
def data_source_update(request, ds_id, data):
return client(request).data_sources.update(ds_id, data)
def job_binary_create(request, name, url, description, extra):
return client(request).job_binaries.create(
name=name,
url=url,
description=description,
ext | ra=extr | a)
def job_binary_list(request, search_opts=None):
return client(request).job_binaries.list(search_opts=search_opts)
def job_binary_get(request, jb_id):
return client(request).job_binaries.get(job_binary_id=jb_id)
def job_binary_delete(request, jb_id):
client(request).job_binaries.delete(job_binary_id=jb_id)
def job_binary_get_file(request, jb_id):
return client(request).job_binaries.get_file(job_binary_id=jb_id)
def job_binary_update(request, jb_id, data):
return client(request).job_binaries.update(jb_id, data)
def job_binary_internal_create(request, name, data):
return client(request).job_binary_internals.create(
name=name,
data=data)
def job_binary_internal_list(request, search_opts=None):
return client(request).job_binary_internals.list(search_opts=search_opts)
def job_binary_internal_get(request, jbi_id):
# The argument name looks wrong. This should be changed in the sahara
# client first and then updated here
return client(request).job_binary_internals.get(job_binary_id=jbi_id)
def job_binary_internal_delete |
from bson.objectid import ObjectId
import json
class Room():
def __init__(self, players_num, objectid, table, current_color='purple'):
if players_num:
self.players_num = players_num
else:
self.players_num = 0
for el in ['p', 'b', 'g', 'r']:
if el in table:
self.players_num += 1
self.objectid = objectid
self.current_color = current_color
self.players_dict = {}
self.alredy_ex = []
self.colors = []
self.winner = None
for col in ['p', 'b', 'g', 'r']:
if col in table:
self.colors.append(
{'p': 'purple',
'b': 'blue',
'g': 'green',
'r': 'red'}[col])
if current_color in self.colors:
self.current_color = current_color
else:
self.current_color = self.colors[0]
self.users_nicks = {}
self.color_player_dict = {'purple': None, 'blue': None, 'green': None, 'red': None}
self.player_color_dict = {}
self.status = 'waiting'
def get_player_by_color(self, color):
if color in self.color_player_dict:
return self.players_dict[self.color_player_dict[color]]
return None
def get_color_by_player(self, player_id):
if player_id in self.player_color_dict:
return self.player_color_dict[player_id]
return None
def add_player(self, player_id, name):
self.players_dict[player_id] = False
self.users_nicks[player_id] = name
for color in self.colors:
if not self.color_player_dict[color]:
self.color_player_dict[color] = player_id
self.player_color_dict[player_id] = color
break
def dell_player(self, player_id):
self.players_dict[player_id] = False
return self
def change_row(self, row, i, to):
return row[:i] + to + row[i + 1:]
def update_table(self, move, table):
print('Table updating')
pymove = json.loads(move)
pytable = json.loads(table)
print('Old table:')
for row in pytable:
print(' ', row)
x0, y0 = int(pymove['X0']), int(pymove['Y0'])
x1, y1 = int(pymove['X1']), int(pymove['Y1'])
print('Move from ({}, {}) to ({}, {})'.format(x0, y0, x1, y1))
if ((abs(x1 - x0) > 1) or (abs(y1 - y0) > 1)):
pytable[x0] = self.change_row(pytable[x0], y0, 'e')
for i in range(-1, 2):
for j in range(-1, 2):
if (x1 + i < len(pytable)) and (x1 + i > -1):
if (y1 + j < len(pytable[x1])) and (y1 + j > -1):
if pytable[x1 + i][y1 + j] != 'e':
pytable[x1 + i] = self.change_row(pytable[x1 + i], y1 + j, self.current_color[0].lower())
pytable[x1] = self.change_row(pytable[x1], y1, self.current_color[0].lower())
res = json.dumps(pytable)
if 'e' not in res:
r_count = (res.count('r'), 'red')
b_count = (res.count('b'), 'blue')
g_count = (res.count('g'), 'green')
p_count = (res.count('p'), 'purple')
sort_list = [r_count, b_count, p_count, g_count]
sort_list.sort()
self.winner = sort_list[-1][1]
print('New table:')
for row in pytable:
print(' ', row)
return res
def can_move(self, table):
pytable = json.loads(table)
for row_id, row in enumerate(pytable):
for char_id in range(len(row)):
char = row[char_id]
if char == self.current_color[0].lower():
for i in range(-2, 3):
for j in range(-2, 3):
if (row_id + i < len(pytable)) and (row_id + i > -1):
if (char_id + j < len(row)) and (char_id + j > -1):
if pytable[row_id + i][char_id + j] == 'e':
return True
return False
def change_color(self, table):
print('С | olor changing')
colors = self.colors
self.current_color = colors[
(colors.index(self.current_color) + 1) % self.players_num]
i = 1
while ((not self.players_dict[self.color_player_dict[self.current_color]]) or (not self.can_move(table))) and (i < | = 5):
self.current_color = colors[
(colors.index(self.current_color) + 1) % self.players_num]
i += 1
if not self.can_move(table):
return None
return self.current_color
class RoomsManager():
def __init__(self, db):
# dict of rooms by their obj_id
self.db = db
self.rooms_dict = {}
def get_room(self, objectid):
if objectid not in self.rooms_dict:
rid = objectid
room_in_db = self.db.rooms.find_one({'_id': ObjectId(rid)})
if room_in_db:
print('Room', objectid, 'extrapolated from db')
new_room = Room(
int(room_in_db['players_num']), rid, room_in_db['table'])
new_room.current_color = room_in_db['current_color']
for user_id in room_in_db['players']:
player = room_in_db['players'][user_id]
new_room.color_player_dict[player['color']] = user_id
new_room.player_color_dict[user_id] = player['color']
new_room.users_nicks[user_id] = player['nick']
new_room.players_dict[user_id] = None
self.rooms_dict[rid] = new_room
else:
return None
return self.rooms_dict[objectid]
def add_room(self, room):
self.rooms_dict[room.objectid] = room
def rooms(self):
for objectid in self.rooms_dict:
yield self.rooms_dict[objectid]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# anchor extraction from html document
from bs4 import BeautifulSoup
import urllib2
import urlparse
import re
import csv
import time
cityId='712'
webpage = urllib2.urlopen('http://domofoto.ru/list.php?cid='+cityId)
soup = BeautifulSoup(webpage)
maxST=0
for element in soup.find_all('a'):
url=element.get('href', '/')
if url.find('st')>0: #TODO rename
par = urlparse.parse_qs(urlparse.urlparse(url).query)
currentST=int(par['st'][0])
if currentST > maxST:
maxST=currentST
#print 'max st='+str(maxST)
#получили смещение максимальной страницы города
#определение списка адресов отдельных страниц домов города
recPerPage=30
pagesCount = maxST // recPerPage
housesPages=[]
for pageST in range(0,pagesCount+1): #
url="http://domofoto.ru/list.php?cid="+cityId+"&st="+str(pageST*recPerPage)
#print url
housesPages.append(url)
#print housesPages
housesIds=[]
housenumber=0
allhousescnt=pagesCount*recPerPage
for housePage in housesPages:
webpage = urllib2.urlopen(housePage)
soup = BeautifulSoup(webpage)
for element in soup.find_all(' | a'):
url=element.get('href', '/')
| if url.find('house')>0: #TODO rename
#print url
houseId=url[7:-1]
#print houseId
housesIds.append(houseId)
#print housesIds
webpage=0
from time import gmtime, strftime
csvFileName='domofoto_'+strftime("%Y-%m-%d-%H-%M-%S", gmtime())
writer = csv.writer(open(csvFileName+'.csv', 'w'))
writer.writerow(['x','y','projectCode','projectName','seriesCode','seriesName','constructionStartDate','constructionEndDate','mode','levels'])
#write vrt file for open csv in ogr2ogr
vrt_file='''
<OGRVRTDataSource>
<OGRVRTLayer name="'''+csvFileName+'''">
<LayerSRS>WGS84</LayerSRS>
<SrcDataSource>'''+csvFileName+'''.csv</SrcDataSource>
<GeometryType>wkbPoint</GeometryType>
<GeometryField encoding="PointFromColumns" x="x" y="y"/>
</OGRVRTLayer>
</OGRVRTDataSource>
'''
vrtf = open(csvFileName+".vrt","w")
vrtf.write(vrt_file)
vrtf.close()
for houseId in housesIds:
housenumber=housenumber+1 #for progress display
housePageURL='http://domofoto.ru/house/'+houseId+'/'
print housePageURL + ' ' +str(housenumber) + '/'+str(allhousescnt)
constructionEndDate=''
constructionStartDate=''
seriesCode=''
seriesName=''
projectCode=''
projectName=''
mode=''
levels=''
webpage = urllib2.urlopen(housePageURL)
#soup = BeautifulSoup(webpage)
html = webpage.read()
coordsPart=re.search('initialize\(\[(.+?), ',html)
if coordsPart:
y = coordsPart.group(1)
coordsPart=re.search(',(.+?)\], true',html)
if coordsPart:
x = coordsPart.group(1)
coordsPart=re.search('Проект.*projects/(.+?)/',html)
if coordsPart:
projectCode = coordsPart.group(1)
coordsPart=re.search('Серия.*projects/(.+?)/',html)
if coordsPart:
seriesCode = coordsPart.group(1)
coordsPart=re.search('Проект.*proj.*>(.+?)</a>',html)
if coordsPart:
projectName = coordsPart.group(1)
coordsPart=re.search('Серия.*proj.*>(.+?)</a>',html)
if coordsPart:
seriesName = coordsPart.group(1)
coordsPart=re.search('Окончание.*строительства.*<b>(.+?)</b>',html)
if coordsPart:
constructionEndDate = coordsPart.group(1)
coordsPart=re.search('Начало.*строительства.*<b>(.+?)</b>',html)
if coordsPart:
constructionStartDate = coordsPart.group(1)
coordsPart=re.search('Текущее состояние.* (.+?) </td></tr>',html)
if coordsPart:
mode = coordsPart.group(1)
coordsPart=re.search('Этажность.*d">(.+?)</td></tr>',html)
if coordsPart:
levels = coordsPart.group(1)
row=[x,y,projectCode,projectName,seriesCode,seriesName,constructionStartDate,constructionEndDate,mode,levels]
writer.writerow(row)
#quit()
#print html
ogr2ogrString='''ogr2ogr -overwrite -f "GeoJSON" '''+csvFileName+'''.geojson '''+csvFileName+'''.vrt'''
print ogr2ogrString
|
from xmlrpclib import Fault
from yaml import dump
from twisted.internet.defer import succeed, inlineCallbacks
from juju.errors import ProviderError
from juju.lib.testing import TestCase
from juju.providers.orchestra.machine import OrchestraMachine
from juju.providers.orchestra.tests.common import OrchestraTestMixin
class OrchestraBootstrapTest(TestCase, OrchestraTestMixin):
def mock_verify(self):
self.mock_fs_put("http://somewhe.re/webdav/bootstrap-verify",
"storage is writable")
def mock_save_state(self):
data = dump({"zook | eeper-instances": ["winston-uid"]})
self.mock_fs_put("http://somewhe.re/webdav/provider-state", data)
def mock_surprise_shutdown(self):
self.proxy_m.callRemote("get_systems")
self.mocker.result(succeed([{ |
"uid": "winston-uid",
"ks_meta": {
"MACHINE_ID": "blah",
"USER_DATA_BASE64": "userdata",
"KEEP": "keep"},
"mgmt_classes": ["acquired", "PRESERVE"]}]))
self.proxy_m.callRemote("find_system", {"uid": "winston-uid"})
self.mocker.result(succeed(["winston"]))
self.proxy_m.callRemote("get_system_handle", "winston", "TOKEN")
self.mocker.result(succeed("some-handle"))
self.proxy_m.callRemote(
"modify_system", "some-handle", "ks_meta", {"KEEP": "keep"},
"TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote(
"modify_system", "some-handle",
"mgmt_classes", ["available", "PRESERVE"], "TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote(
"modify_system", "some-handle", "netboot_enabled", True, "TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote("save_system", "some-handle", "TOKEN")
self.mocker.result(succeed(True))
self.proxy_m.callRemote(
"background_power_system",
{"power": "off", "systems": ["winston"]}, "TOKEN")
self.mocker.result(succeed("ignored"))
def test_already_bootstrapped(self):
self.setup_mocks()
self.mock_find_zookeepers(("winston-uid", "winston"))
self.mocker.replay()
def verify_machines(machines):
(machine,) = machines
self.assertTrue(isinstance(machine, OrchestraMachine))
self.assertEquals(machine.instance_id, "winston-uid")
d = self.get_provider().bootstrap()
d.addCallback(verify_machines)
return d
def test_no_machines_available(self):
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems(acceptable=False)
self.mocker.replay()
d = self.get_provider().bootstrap()
self.assertFailure(d, ProviderError)
def verify_auth_error(self, error):
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems()
self.mock_acquire_system(error)
self.mocker.replay()
d = self.get_provider().bootstrap()
self.assertFailure(d, type(error))
def test_non_auth_fault(self):
return self.verify_auth_error(Fault("blah", "some random error"))
def test_non_auth_error(self):
return self.verify_auth_error(Exception("fiddlesticks"))
@inlineCallbacks
def verify_change_failures(self, **kwargs):
log = self.capture_logging("juju.orchestra")
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems()
self.mock_acquire_system()
self.mock_start_system(
self.get_verify_ks_meta(0, "bootstrap_user_data"), **kwargs)
self.mock_surprise_shutdown()
self.mocker.replay()
d = self.get_provider().bootstrap()
yield self.assertFailure(d, ProviderError)
self.assertIn(
"Failed to launch machine winston-uid; attempting to revert.",
log.getvalue())
def test_cannot_modify_machine(self):
"""
Check that failures when launching the machine cause an (attempt to)
roll back to an unacquired state.
"""
return self.verify_change_failures(fail_modify=True)
def test_cannot_save_machine(self):
"""
Check that failures when launching the machine cause an (attempt to)
roll back to an unacquired state.
"""
return self.verify_change_failures(fail_save=True)
def test_launch_available_machine(self):
self.setup_mocks()
self.mock_find_zookeepers()
self.mock_verify()
self.mock_get_systems()
self.mock_acquire_system()
self.mock_start_system(
self.get_verify_ks_meta(0, "bootstrap_user_data"))
self.mock_describe_systems(succeed([{
"uid": "winston-uid",
"name": "winston",
"mgmt_classes": ["acquired"],
"netboot_enabled": True}]))
self.mock_save_state()
self.mocker.replay()
def verify_machines(machines):
(machine,) = machines
self.assertTrue(isinstance(machine, OrchestraMachine))
self.assertEquals(machine.instance_id, "winston-uid")
self.assertEquals(machine.dns_name, "winston")
d = self.get_provider().bootstrap()
d.addCallback(verify_machines)
return d
|
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
#pylint: skip-file
import numpy as np
from horton import *
def test_atom_si_uks():
fn_out = context.get_fn('test/atom_si.cp2k.out')
mol = IOData.from_file(fn_out)
assert (mol.numbers == [14]).all()
assert (mol.pseudo_numbers == [4]).all()
assert (mol.exp_alpha.occupations == [1, 1, 1, 0]).all()
assert (mol.exp_beta.occupations == [1, 0, 0, 0]).all()
assert abs(mol.exp_alpha.energies - [-0.398761, -0.154896, -0.154896, -0.154896]).max() < 1e-4
assert abs(mol.exp_beta.energies - [-0.334567, -0.092237, -0.092237, -0.092237]).max() < 1e-4
assert abs(mol.energy - -3.761587698067) < 1e-10
assert (mol.obasis.shell_types == [0, 0, 1, 1, -2]).all()
olp = mol.obasis.compute_overlap(mol.lf)
ca = mol.exp_alpha.coeffs
cb = mol.exp_beta.coeffs
assert abs(np.diag(olp._array[:2,:2]) - np.array([0.42921199338707744, 0.32067871530183140])).max() < 1e-5
assert abs(np.dot(ca.T, np.dot(olp._array, ca)) - np.identity(4)).max() < 1e-5
assert abs(np.dot(cb.T, np.dot(olp._array, cb)) - np.identity(4)).max() < 1e-5
def test_atom_o_rks():
fn_out = context.get_fn('test/atom_om2.cp2k.out')
mol = IOData.from_file(fn_out)
assert (mol.numbers == [8]).all()
assert (mol.pseudo_numbers == [6]).all()
assert (mol.exp_alpha.occupations == [1, 1, 1, 1]).all()
| assert abs(mol.exp_alpha.energies - [0.102709, 0.606458, 0.606458, 0.606458]).max() < 1e-4
assert abs(mol.energy - -15.464982778766) < 1e-10
assert (mol.obasis.shel | l_types == [0, 0, 1, 1, -2]).all()
olp = mol.obasis.compute_overlap(mol.lf)
ca = mol.exp_alpha.coeffs
assert abs(np.dot(ca.T, np.dot(olp._array, ca)) - np.identity(4)).max() < 1e-5
|
#!/usr/bin/python
from cm_api.api_client import ApiResource
import time
api = ApiResource(sys.argv[1], 7180, "acm", "SCALE42secretly", version=15)
cluster = None
try:
cluster = api.get_cluster(name = "ACM Cluster")
except Exception, e:
if e.message[-10:-1].lower() == "not found":
print "<ACM CLUSTER> NOT FOUND ! - n | ot proceeding any further..."
exit()
#Find available parcels...
available_parcels = cluster.get_all_parcels()
CDH_TARGET = None
for p in available_parcels:
if p.product.lower() == "cdh" and p.version[:1] == "5":
CDH_TARGET = { "name" : p.product, "version" : p.version }
break
if | CDH_TARGET is not None:
parcel = cluster.get_parcel(CDH_TARGET['name'] , CDH_TARGET['version'])
if parcel.stage == "ACTIVATED":
print "Parcel <{0}-v{1}> is already <ACTIVATED> across the entire cluster !".format(CDH_TARGET['name'] , CDH_TARGET['version'])
elif parcel.stage == "DISTRIBUTED":
try:
print "Activating <{0}-v{1}> parcel across the cluster !".format(CDH_TARGET['name'] , CDH_TARGET['version'])
parcel.activate()
time.sleep(10)
#Restart the ACM cluster
print "Restarting <{0}> cluster through the cloudera manager !".format(cluster.name)
cluster.stop().wait()
cluster.start().wait()
print "Ready to start rolling with Cloudera Managaer and <ACM Cluster> !"
except Exception, e:
print "Unable to activate parcel <{0}-v{1}> and restart cluster !!! reason : {2}".format(CDH_TARGET['name'] , CDH_TARGET['version'], e.message)
else:
print "We were unable to target any CDH-5 parcel available remotely !" |
と経度の設定は、以下の地図をクリックすることでも可能です:',
'You can click on the map to select the Lat/Lon fields. Longitude is West - East (sideways). Latitude is North-South (Up-Down). Latitude is zero on the equator and positive in the northern hemisphere and negative in the southern hemisphere. Longitude is zero on the prime meridian (Greenwich Mean Time) and is positive to the east, across Europe and Asia. Longitude is negative to the west, across the Atlantic and the Americas. This needs to be added in Decimal Degrees.': '経度/緯度の項目は、地図を選択することでも登録可能です。経度は東西方向(横)の座標軸です。緯度は南北方向(上下)の座標軸です。赤道ではゼロ、北半球ではプラス、南半球ではマイナスとなります。経度は、子午線(グリニッジ標準時)をゼロとして、東(ヨーロッパ、アジア)がプラスとなります。西(大西洋、アメリカ)がマイナスです。10進法で記入してください。',
'You can select the Draw tool (': '選択可能な描画ツール (',
'You can select the Draw tool': 'ドローツールを選択できます',
'You can set the modem settings for SMS here.': 'SMS用モデムの設定をすることができます。',
'You can use the Conversion Tool to convert from either GPS coordinates or Degrees/Minutes/Seconds.': '変換ツールを使うことで、GPS、あるいはDegrees/Minutes/Seconds形式からデータを変換できます。',
'You do no have permission to cancel this received shipment.': '輸送の受け取りをキャンセルする権限がありません',
'You do no have permission to cancel this sent shipment.': '輸送の送付をキャンセルする権限がありません',
'You do no have permission to make this commitment.': 'このコミットを作成する権限がありません',
'You do no have permission to receive this shipment.': 'この輸送を受け取る権限がありません',
'You do no have permission to send this shipment.': 'この輸送を開始する権限がありません',
'You do not have permission for any site to add an inventory item.': 'あなたには他の場所から在庫アイテムを追加する権限はありません',
'You do not have permission for any site to make a commitment.': 'どの場所にも受け入れを示す権限が有りません。',
'You do not have permission for any site to make a request.': '支援要請を作成する権限がありません',
'You do not have permission for any site to perform this action.': 'この操作をするための権限がありません',
'You do not have permission for a | ny site to receive a shipment.': '物資の輸送を受け取る権限がありません',
'You do not have permission for any site to send a shipment.': '物資の輸送をする権限がありません',
'You do not have permission to send a shipment from this site.': 'あなたはこのサイトから物資を送る権限はありません',
'You have a personal map configuration. To change your personal configuration, click ': '個人用地図設定があります。あなたの個人用地図設定を編集するにはクリックしてください',
'You have found a dead body?': '遺体を発見 | しましたか?',
'You must be logged in to register volunteers.': 'ボランティアとして登録するには、ログインする必要があります',
'You must be logged in to report persons missing or found.': '行方不明者の発見状況を登録するには、ログインする必要があります。',
'You must provide a series id to proceed.': '処理を行うにはシリーズIDを指定する必要があります。',
'You should edit OpenStreetMap settings in models/000_config.py': 'OpenStreetMapの設定を変更するには、models/000_config.pyを編集してください',
'You should edit Twitter settings in models/000_config.py': 'Twitter設定を変更するには、models/000_config.pyを編集してください。',
'Your Account is Approved - you can now login\n %s%s/': '利用者登録が完了しました。リンク先のログインページで あなたが登録したユーザー名とパスワードを入力してログインしてください。\n %s%s/',
'Your Account is Approved': '利用者登録が完了しました',
'Your action is required. Please approve user %s asap: ': 'あなたの行動が要求されています。ただちにユーザー %s を承認してください。',
'Your action is required. Please approve user': 'ユーザーから承認の依頼が届いています。承諾お願いします',
'Your current ordered list of solution items is shown below. You can change it by voting again.': '解決項目の順番付きリストは以下です。再度投票することによって変更可能です。',
'Your post was added successfully.': '投稿が成功しました',
'Your system has been assigned a unique identification (UUID), which other computers around you can use to identify you. To view your UUID, you may go to Synchronization -> Sync Settings. You can also see other settings on that page.': 'あなたがお使いのシステムには、ユニークID (UUID) が割り当てられており、このIDを用いて他のコンピュータがあなたのシステムを同定します。あなたの UUID を閲覧するには、同期 -> 同期設定と進んでください。そのページでは、他の設定を閲覧することもできます。',
'ZIP/Postcode': '郵便番号',
'Zinc roof': 'トタン屋根',
'Zoom In: click in the map or use the left mouse button and drag to create a rectangle': 'ズームイン: マップをクリックするか、拡大したい場所をドラッグで選択してください',
'Zoom Levels': 'ズームレベル',
'Zoom Out: click in the map or use the left mouse button and drag to create a rectangle': 'ズームアウト: マップをクリックするか、拡大したい地点をマウスの左ボタンでドラッグしてください',
'Zoom to Current Location': '現在の場所を拡大',
'Zoom to maximum map extent': 'マップの最大範囲までズーム',
'Zoom': 'ズーム',
'act': '活動',
'active': 'アクティブ',
'added': '追加しました',
'all records': '全てのレコード',
'allows a budget to be developed based on staff & equipment costs, including any admin overheads.': 'では、スタッフや設備、それらの管理コストまで含めた予算編成を行ないます。',
'allows for creation and management of surveys to assess the damage following a natural disaster.': '自然災害による被災影響調査の作成、および管理を許可する',
'an individual/team to do in 1-2 days': '個人やチーム単位で、1-2日中に実施するべき事柄をさします。',
'approved': '承認された',
'assigned': '担当者・部門が確定',
'average': '平均的',
'black': '黒',
'blond': 'ブロンド',
'blue': '青',
'brown': '茶色',
'business_damaged': 'ビジネスへの損害',
'by': ' ',
'c/o Name': 'c/o 名前',
'can be used to extract data from spreadsheets and put them into database tables.': 'スプレッドシートからデータを抽出して、データベーステーブルに挿入できます。',
'can use this to identify the Location': 'ここからロケーションの特定が可能です',
'caucasoid': '白人',
'check all': '全てチェック',
'click for more details': '詳細はクリック',
'collateral event': '付帯イベント',
'completed': '完了',
'confirmed': '確認済',
'consider': '考慮',
'criminal intent': '犯罪目的',
'crud': '性病',
'curly': '縮れ毛',
'currently registered': '登録済み',
'daily': '日次',
'dark': '濃い',
'data uploaded': 'データがアップロードされました',
'database %s select': 'データベース%sの選択',
'database': 'データベース',
'db': 'データベース',
'delete all checked': 'チェックされた項目を全て削除',
'deleted': '削除されました',
'denied': '拒否されました',
'description': '説明',
'design': 'デザイン',
'diseased': '罹患中',
'displaced': '避難中',
'divorced': '離別',
'done!': '完了!',
'duplicate': '重複',
'edit': '編集',
'editor': '編集者',
'eg. gas, electricity, water': 'ガス、電気、水道など',
'embedded': '埋め込まれた',
'enclosed area': '専用地',
'export as csv file': 'csvファイルとしてエクスポート',
'fat': '肥満',
'feedback': '現地からの要望',
'female': '女性',
'final report': '最終報告書',
'flush latrine with septic tank': '浄化槽つき水洗トイレ',
'follow-up assessment': 'アセスメントのフォローアップ',
'food_sources': '食糧供給源',
'forehead': 'ひたい',
'form data': 'フォームデータ',
'from Twitter': 'Twitter経由',
'full': '完全',
'getting': '取得中',
'green': '緑',
'grey': '灰色',
'here': 'ここ',
'high': '高い',
'hourly': '1時間毎',
'households': '世帯情報',
'human error': 'ヒューマンエラー',
'identified': '身元確認済み',
'ignore': '無視する',
'immediately': '即応',
'in Deg Min Sec format': 'Deg Min Sec フォーマットで',
'in GPS format': 'GPS フォーマットで',
'in Inv.': '個',
'inactive': '休止中',
'initial assessment': '初期アセスメント',
'injured': '負傷中',
'insert new %s': '%sの新規挿入',
'insert new': '新規挿入',
'invalid request': '無効な要求',
'invalid': '無効',
'is a central online repository where information on all the disaster victims and families, especially identified casualties, evacuees and displaced people can be stored. Information like name, age, contact number, identity card number, displaced location, and other details are captured. Picture and finger print details of the people can be uploaded to the system. People can also be captured by group for efficiency and convenience.': 'は、災害犠牲者とその家族、特に身元の判明した遺体、避難者、難民など、全ての情報を集約可能な中央オンラインレポジトリです。名前、年齢、連絡先番号、IDカード番号、避難した場所、その他の詳細が記録されます。人物の写真や指紋をアップロードすることができます。効率性と利便性のため、人物をグループ分けすることができます。',
'is envisioned to be composed of several sub-modules that work together to provide complex functionality for the management of relief and project items by an organization. This includes an intake system, a warehouse management system, commodity tracking, supply chain management, fleet management, procurement, financial tracking and other asset and resource management capabilities': 'は、支援団体による救援活動や復興プロジェクトの作業を管理するために、複数のサブモジュールを組み合わせて高度な機能を実現しようと考えており、物資の受け入れ、貯蔵設備の管理、必要な物資の記録、サプライチェーン・マネジメント、輸送管理、調達、財務記録、その他様々な資産やリソースの管理といった機能を備えています',
'keeps track of all incoming tickets allowing them to be categorised & routed to the appropriate place for actioning.': '全ての入荷伝票を追跡することで、カテゴリー分けや適切な実行場所への配分を行う',
'kilogram': 'キログラム',
'kit': 'キット',
'latrines': 'トイレ',
'leave empty to detach account': 'アカウントを取り外すには空欄のままにしてください',
'legend URL': '凡例の URL',
'light': '淡い',
'liter': 'リットル',
'locations': 'ロケーション',
'login': 'ログイン',
'long': '長い',
'long>12cm': '12cm以上',
'low': '低い',
'male': '男性',
'manual': 'マニュアル',
'married': '既婚',
'max': '最大',
'maxExtent': '最 |
# -*- coding: utf-8 -*-
# Copyright (C) 2012-2018 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Pub | lic
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
from __future__ import unicode_literals
import argparse
import dnf.conf
import dnf.conf.read
import dnf.exceptions
from dnf.conf import Option, BaseConfig, Conf, RepoConf
import tests.support
from tests.support import mock
class C | acheTest(tests.support.TestCase):
@mock.patch('dnf.util.am_i_root', return_value=True)
@mock.patch('dnf.const.SYSTEM_CACHEDIR', '/var/lib/spinning')
def test_root(self, unused_am_i_root):
conf = dnf.conf.Conf()
self.assertEqual(conf.system_cachedir, '/var/lib/spinning')
self.assertEqual(conf.cachedir, '/var/lib/spinning')
@mock.patch('dnf.yum.misc.getCacheDir',
return_value="/notmp/dnf-walr-yeAH")
@mock.patch('dnf.util.am_i_root', return_value=False)
@mock.patch('dnf.const.SYSTEM_CACHEDIR', '/var/lib/spinning')
def test_noroot(self, fn_root, fn_getcachedir):
self.assertEqual(fn_getcachedir.call_count, 0)
conf = dnf.conf.Conf()
self.assertEqual(conf.cachedir, '/notmp/dnf-walr-yeAH')
self.assertEqual(fn_getcachedir.call_count, 1)
class ConfTest(tests.support.TestCase):
def test_bugtracker(self):
conf = Conf()
self.assertEqual(conf.bugtracker_url,
"https://bugzilla.redhat.com/enter_bug.cgi" +
"?product=Fedora&component=dnf")
def test_conf_from_file(self):
conf = Conf()
# defaults
self.assertFalse(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 3)
self.assertTrue(conf.clean_requirements_on_remove)
conf.config_file_path = '%s/etc/dnf/dnf.conf' % tests.support.dnf_toplevel()
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
self.assertTrue(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 3)
self.assertTrue(conf.clean_requirements_on_remove)
def test_overrides(self):
conf = Conf()
self.assertFalse(conf.assumeyes)
self.assertFalse(conf.assumeno)
self.assertEqual(conf.color, 'auto')
opts = argparse.Namespace(assumeyes=True, color='never')
conf._configure_from_options(opts)
self.assertTrue(conf.assumeyes)
self.assertFalse(conf.assumeno) # no change
self.assertEqual(conf.color, 'never')
def test_order_insensitive(self):
conf = Conf()
conf.config_file_path = '%s/etc/dnf/dnf.conf' % tests.support.dnf_toplevel()
opts = argparse.Namespace(
gpgcheck=False,
main_setopts={'installonly_limit': ['5']}
)
# read config
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
# update from commandline
conf._configure_from_options(opts)
self.assertFalse(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 5)
# and the other way round should have the same result
# update from commandline
conf._configure_from_options(opts)
# read config
conf.read(priority=dnf.conf.PRIO_MAINCONFIG)
self.assertFalse(conf.gpgcheck)
self.assertEqual(conf.installonly_limit, 5)
def test_inheritance1(self):
conf = Conf()
repo = RepoConf(conf)
# minrate is inherited from conf
# default should be the same
self.assertEqual(conf.minrate, 1000)
self.assertEqual(repo.minrate, 1000)
# after conf change, repoconf still should inherit its value
conf.minrate = 2000
self.assertEqual(conf.minrate, 2000)
self.assertEqual(repo.minrate, 2000)
def test_inheritance2(self):
conf = Conf()
# if repoconf reads value from config it no more inherits changes from conf
conf.config_file_path = tests.support.resource_path('etc/repos.conf')
with mock.patch('logging.Logger.warning'):
reader = dnf.conf.read.RepoReader(conf, {})
repo = list(reader)[0]
self.assertEqual(conf.minrate, 1000)
self.assertEqual(repo.minrate, 4096)
# after global change
conf.minrate = 2000
self.assertEqual(conf.minrate, 2000)
self.assertEqual(repo.minrate, 4096)
def test_prepend_installroot(self):
conf = Conf()
conf.installroot = '/mnt/root'
conf.prepend_installroot('persistdir')
self.assertEqual(conf.persistdir, '/mnt/root/var/lib/dnf')
def test_ranges(self):
conf = Conf()
with self.assertRaises(dnf.exceptions.ConfigError):
conf.debuglevel = '11'
|
# -*- coding: utf-8 -*-
# Copyright 2015 Telefonica Investigación y Desarrollo, S.A.U
#
# This file is part of FIWARE project.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# See the License for the specific language governing permissions and
# limitations under the License.
#
# For those usages not covered by the Apache version 2.0 License please
# contact with opensource@tid.es
__author__ = "@jframos"
from qautils.http.headers_utils import set_representation_headers, HEADER_REPRESENTATION_JSON
from qautils.logger.logger_utils import get_logger
from keystoneclient.v2_0 import Client as KeystoneClient
from fiwarecloto_client.tenantid_resource import TenantIdResourceClient
__logger__ = get_logger(__name__)
# HEADERS
X_AUTH_TOKEN = "X-Auth-Token"
TENANT_ID = "Tenant-Id"
class ClotoClient():
def __init__(self, username, password, tenant_id, auth_url, api_protocol, api_host, api_port, api_resource):
"""
Init a new Client for CLOTO component.
:param username (string): The username (OpenStack)
:param password (string): The password
:param tenant_id (string): TenantID
:param auth_url (string): Keystone/IdM auth URL
:param api_protocol (string): API protocol
:param api_host (string): API host
:param api_port (string): API port
:param api_resource (string): API base resource
:return: None
"""
__logger__.info("Init CLOTO Client")
__logger__.debug("Client parameters: Username: %s, Password: %s, TenantId: %s, API protocol: %s, API host: %s, "
"API port: %s, Base resource: %s", username, password, tenant_id, api_protocol, api_host,
api_port, api_resource)
self.headers = dict()
self.api_protocol = api_protocol
self.api_host = api_host
self.api_port = api_port
self.api_resource = api_resource
set_representation_headers(self.headers, content_type=HEADER_REPRESENTATION_JSON,
accept=HEADER_REPRESENTATION_JSON)
self._init_keystone_client(username, password, tenant_id, auth_url)
self.token = self._get_auth_token()
__logger__.debug("Token: %s", self.token)
self.headers.update({X_AUTH_TOKEN: self.token})
self.headers.update({TENANT_ID: tenant_id})
__logger__.debug("Headers with OpenStack credentials: %s", self.headers)
def _init_keystone_client(self, username, password, tenant_id, auth_url):
"""
Init the keystone client to request token and endpoint data
:param string username: Username for authentication.
:param string password: Password for authentication.
:param string tenant_id: Tenant id.
:param string auth_url: Keystone service endpoint for authorization.
:param string region_name: Name of a region to select when choosing an
endpoint from the service catalog.
:return None
"""
__logger_ | _.debug("Init Keystone Client")
self.keystone_client = KeystoneClient(username=username, password=password, tenant_id=tenant_id,
auth_url=auth_url)
def _get_auth_token(self):
"""
Get token from Keystone
:return: Token (String)
"""
__logger__.debug("G | etting auth Token")
return self.keystone_client.auth_ref['token']['id']
def get_tenant_id_resource_client(self):
"""
Create an API resource REST client
:return: Rest client for 'TenantId' API resource
"""
__logger__.info("Creating TenantIdResource")
return TenantIdResourceClient(protocol=self.api_protocol, host=self.api_host,
port=self.api_port, resource=self.api_resource, headers=self.headers)
|
#!/usr/bin/env python
#import logging
from webserver im | port *
if __name__ == '__main__':
#logging.basicConfig(
# format="[%(asctime)s] %(name)s/%(levelname)-6s - %(message)s",
# level=logging.CRITICAL,
# datefmt='%Y-%m-%d %H:%M:%S'
#)
# Only enable debug level for bbot
#logger = logging.getLogger('bastardbot')
#logger.setLevel(logging.DEBUG)
print("Initializing BastardBot web server...")
B = BastardBot()
print("Ba | stardBot server stopped.")
|
# -*- coding: utf-8 -*-
import logging
# define here the methods needed to be run at install time
def importVarious(context):
if context.readDataFile(' | sc.blueprints.soundcloud_various.txt') is None:
return
| logger = logging.getLogger('sc.blueprints.soundcloud')
# add here your custom methods that need to be run when
# sc.blueprints.soundcloud is installed
|
import os, sys
import json
import copy
import numpy as np
import random
from multiprocessing import Pool
import ipdb
################################################################################################
utils_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'nlp scripts')
source_vh_dir = '/home/ronotex/Downloads/vector_hash/ingenierias_mineria'
#source_vh_dir = '/home/ronotex/Downloads/vector_hash/mantenimiento_en_minernia'
#treemap_name = 'carreras_rubro_mina'
#adj_name = 'ing_total_adjmatrix'
treemap_name = 'carreras_mantto_mina'
adj_name = 'mantto_mina_adjmatrix'
class LabelDict(dict):
def __init__(self, label_names=[]):
self.names = []
for name in label_names:
self.add(name)
def add(self, name):
label_id = len(self.names)
if name in self:
#warnings.warn('Ignoring duplicated label ' + name)
return self[name]
self[name] = label_id
self.names.append(name)
return label_id
def get_label_name(self, label_id):
return self.names[label_id]
def get_label_id(self, name):
if name not in self:
return -1
return self[name]
def size(self):
return len(self.names)
################################################################################################
hierarchy = json.loads(open('carreras_ing2.json').read())
# docname : {docname : true name}
nameByFile = json.loads(open('ident_names2.json').read())
fileByName = {}
temp={}
for (file,name) in nameByFile.items():
temp[file.strip(' ')] = name.strip(' ')
fileByName[name.strip(' ')] = file.strip(' ')
nameByFile = dict(temp)
################################################################################################
def sorter(T,sizeById, file_dict):
if "children" not in T:
_id = file_dict.get_label_id(fileByName[T["name"]])
try:
T["size"] = int(sizeById[_id])
except:
T["size"] = sizeById[_id]
return float(T["size"])
children = T["children"]
temp = []
_total = 0
for child in children:
subt_sum = sorter(child,sizeById, file_dict)
_total += subt_sum
temp.append(subt_sum)
temp = list(zip(temp,range(len(children))))
temp.sort(reverse=True)
T["children"] = [children[k[1]] for k in temp]
return _total
def getSortedLeaves(T, V,file_dict):
if "children" not in T:
fn = fileByName[ T["name"] ]
V.append(file_dict.get_label_id(fn))
return
for child in T["children"]:
getSortedLeaves(child,V,file_dict)
############################## | ##################################################################
################################################################################################
if __name__=='__main__':
vh_dict = LabelDict()
file_dict = LabelDict()
graph = np.zeros([30,30])
vhsByFile = [set() for i in range(30)]
freq_major = np.zeros([30])
for root,dirs,filenames in os.walk(source_vh_dir):
for f in filenames:
if f[-1]!='~':
#file_name = f[3:] # vh_name
#if file_name=='all' or file_name= | ='ing':
# continue
p = f.find('_mineria')
#p = f.find('_mantto_mineria')
file_name = f[3:p] # vh_name_mineria
#file_name = f[14:] # mantto_min_vh_name
id_file = file_dict.add(file_name)
for line in open(os.path.join(source_vh_dir,f)):
line = line.strip('\n')
if line!='':
id_vh = vh_dict.add(line)
freq_major[id_file]+=1
vhsByFile[id_file].add(id_vh)
count_id_vh = vh_dict.size()
count_id_file = file_dict.size()
print(count_id_vh)
print(count_id_file)
ipdb.set_trace()
# node
for k in range(count_id_file):
# posible edges
outgoing = set()
for i in range(count_id_file):
if k!=i:
temp = vhsByFile[k] & vhsByFile[i]
graph[k,i] = len(temp)
outgoing |= temp
graph[k,k] = freq_major[k] - len(outgoing)
# GENERATE CARRERAS.JSON
tot = sorter(hierarchy,freq_major,file_dict)
open(treemap_name+'.json','w').write(json.dumps(hierarchy,ensure_ascii=False, indent = 2))
per_hierarchy = dict(hierarchy)
temp = [format(x,'.2f') for x in 100.0*freq_major/count_id_vh]
tot = sorter(per_hierarchy,temp,file_dict)
open(treemap_name+'_perc.json','w').write(json.dumps(per_hierarchy,ensure_ascii=False, indent = 2))
# GENERATE ADJMATRIX.JSON
sorted_ids = []
getSortedLeaves(hierarchy,sorted_ids,file_dict)
adjmatrix = []
for k in sorted_ids:
if freq_major[k]==0:
continue
u = file_dict.get_label_name(k)
item = dict()
item["name"] = nameByFile[u]
item["size"] = int(freq_major[k])
item["imports"] = []
for i in sorted_ids:
if graph[k,i]>0:
v = file_dict.get_label_name(i)
imp = dict({'name':nameByFile[v],'weight':int(graph[k,i])})
item["imports"].append(imp)
adjmatrix.append(item)
open(adj_name + '.json','w').write(json.dumps(adjmatrix,ensure_ascii=False, indent = 2)) |
pers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <osirpt.sun@gmail.com>
#
import unittest
import numpy
from pyscf import gto
from pyscf import scf
from pyscf import lib
cu1_basis = gto.basis.parse('''
H S
1.8000000 1.0000000
H S
2.8000000 0.0210870 -0.0045400 0.0000000
1.3190000 0.3461290 -0.1703520 0.0000000
0.9059000 0.0393780 0.1403820 1.0000000
H P
2.1330000 0.0868660 0.0000000
1.2000000 0.0000000 0.5000000
0.3827000 0.5010080 1.0000000
H D
0.3827000 1.0000000
H F
2.1330000 0.1868660 0.0000000
0.3827000 0.2010080 1.0000000
''')
mol = gto.M(atom='''
Cu1 0. 0. 0.
Cu 0. 1. 0.
He 1. 0. 0.
''',
basis={'Cu':'lanl2dz', 'Cu1': cu1_basis, 'He':'sto3g'},
ecp = {'cu':'lanl2dz'})
mol1 = gto.M(atom='''
Cu1 0. 0. 0.
Cu 0. 1. 0.
He 1. 0. 0.
Ghost-Cu1 0. 0. 0.0001
''',
basis={'Cu':'lanl2dz', 'Cu1': cu1_basis, 'He':'sto3g'},
ecp = {'cu':'lanl2dz'})
mol2 = gto.M(atom='''
Cu1 0. 0. 0.
Cu 0. 1. 0.
He 1. 0. 0.
Ghost-Cu1 0. 0. -0.0001
''',
basis={'Cu':'lanl2dz', 'Cu1': cu1_basis, 'He':'sto3g'},
ecp = {'cu':'lanl2dz'})
def tearDownModule():
global mol, mol1, mol2, cu1_basis
del mol, mol1, mol2, cu1_basis
class KnownValues(unittest.TestCase):
def test_ecp_by_shell(self):
for i in (0,2,3,6,9):
for j in (1,2,3,5,6):
ref = mol.intor_by_shell('ECPscalar_sph', (i,j))
dat = gto.ecp.type1_by_shell(mol, (i, j))
dat+= gto.ecp.type2_by_shell(mol, (i, j))
self.assertAlmostEqual(abs(ref-dat).max(), 0, 12)
ref = mol.intor_by_shell('ECPscalar_cart', (i,j))
dat = gto.ecp.type1_by_shell(mol, (i, j), cart=True)
dat+= gto.ecp.type2_by_shell(mol, (i, j), cart=True)
self.assertAlmostEqual(abs(ref-dat).max(), 0, 12)
def test_nr_rhf(self):
mol = gto.M(atom='Na 0. 0. 0.; H 0. 0. 1.',
basis={'Na':'lanl2dz', 'H':'sto3g'},
ecp = {'Na':'lanl2dz'},
verbose=0)
self.assertAlmostEqual(lib.fp(mol.intor('ECPscalar')), -0.19922134780248762, 9)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.45002315563472206, 10)
def test_bfd(self):
mol = gto.M(atom='H 0. 0. 0.',
basis={'H':'bfd-vdz'},
ecp = {'H':'bfd-pp'},
spin = 1,
verbose=0)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.499045, 6)
mol = gto.M(atom='Na 0. 0. 0.',
basis={'Na':'bfd-vtz'},
ecp = {'Na':'bfd-pp'},
spin = 1,
verbose=0)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.181799, 6)
mol = gto.M(atom='Mg 0. 0. 0.',
basis={'Mg':'bfd-vtz'},
ecp = {'Mg':'bfd-pp'},
spin = 0,
verbose=0)
mf = scf.RHF(mol)
self.assertAlmostEqual(mf.kernel(), -0.784579, 6)
# mol = gto.M(atom='Ne 0. 0. 0.',
# basis={'Ne':'bfd-vdz'},
# ecp = {'Ne':'bfd-pp'},
# verbose=0)
# mf = scf.RHF(mol)
# self.assertAlmostEqual(mf.kernel(), -34.709059, 6)
def test_ecp_grad(self):
aoslices = mol.aoslice_nr_by_atom()
ish0, ish1 = aoslices[0][:2]
for i in range(ish0, ish1):
for j in range(mol.nbas):
shls = (i,j)
sh | ls1 = (shls[0] + mol.nbas, shls[1])
ref = (mol1.intor_by_shell('ECPscalar_cart', shls1) -
mol2.intor_by_shell('ECPscalar_cart', shls1)) / 0.0002 * lib.param.BOHR
| dat = mol.intor_by_shell('ECPscalar_ipnuc_cart', shls, comp=3)
self.assertAlmostEqual(abs(-dat[2]-ref).max(), 0, 4)
def test_ecp_iprinv(self):
mol = gto.M(atom='''
Cu 0. 0. 0.
H 1. 0. 0.
''',
basis={'Cu':'lanl2dz', 'H':'ccpvdz'},
ecp = {'cu':'lanl2dz'})
mol1 = gto.M(atom='''
Cu 0. 0. 0.
H 1. 0. 0.
Ghost-Cu 0. 0. 0.0001
''',
basis={'Cu':'lanl2dz', 'H':'ccpvdz'},
ecp = {'cu':'lanl2dz'})
mol2 = gto.M(atom='''
Cu 0. 0. 0.
H 1. 0. 0.
Ghost-Cu 0. 0. -0.0001
''',
basis={'Cu':'lanl2dz', 'H':'ccpvdz'},
ecp = {'cu':'lanl2dz'})
aoslices = mol.aoslice_nr_by_atom()
ish0, ish1 = aoslices[0][:2]
for i in range(ish0, ish1):
for j in range(mol.nbas):
shls = (i,j)
shls1 = (shls[0] + mol.nbas, shls[1])
ref = (mol1.intor_by_shell('ECPscalar_cart', shls1) -
mol2.intor_by_shell('ECPscalar_cart', shls1)) / 0.0002 * lib.param.BOHR
with mol.with_rinv_at_nucleus(0):
dat = mol.intor_by_shell('ECPscalar_iprinv_cart', shls, comp=3)
self.assertAlmostEqual(abs(-dat[2]-ref).max(), 0, 4)
def test_ecp_hessian(self):
aoslices = mol.aoslice_nr_by_atom()
ish0, ish1 = aoslices[0][:2]
for i in range(ish0, ish1):
for j in range(mol.nbas):
shls = (i,j)
shls1 = (shls[0] + mol.nbas, shls[1])
ref =-(mol1.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3) -
mol2.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3)) / 0.0002 * lib.param.BOHR
dat = mol.intor_by_shell('ECPscalar_ipipnuc_cart', shls, comp=9)
di, dj = dat.shape[1:]
dat = dat.reshape(3,3,di,dj)
self.assertAlmostEqual(abs(dat[2]-ref).max(), 0, 3)
for i in range(mol.nbas):
for j in range(ish0, ish1):
shls = (i,j)
shls1 = (shls[0], shls[1] + mol.nbas)
ref =-(mol1.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3) -
mol2.intor_by_shell('ECPscalar_ipnuc_cart', shls1, comp=3)) / 0.0002 * lib.param.BOHR
dat = mol.intor_by_shell('ECPscalar_ipnucip_cart', shls, comp=9)
di, dj = dat.shape[1:]
dat = dat.reshape(3,3,di,dj)
self.assertAlmostEqual(abs(dat[:,2]-ref).max(), 0, 3)
def test_pp_int(self):
from pyscf import gto, scf
from pyscf.pbc import gto as pbcgto
from pyscf.pbc import scf as pbcscf
from pyscf.pbc import df
cell = pbcgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],
'C' :'gth-szv',}
cell.pseudo = {'C':'gth-pade',
'He': pbcgto.pseudo.parse('''He
2
0.40000000 3 -1.98934751 -0.75604821 0.95604821
2
0.29482550 3 1.23870466 .855 .3
.71 -1.1
.9
0.32235865 2 2.25670239 -0.39677748
|
import numpy as np
from numpy.testing import (assert_equal, assert_array_almost_equal,
assert_raises)
from skimage.transform._geometric import _stackcopy
from skimage.transform._geometric import GeometricTransform
from skimage.transform import (estimate_transform, matrix_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
SRC = np.array([
[-12.3705, -10.5075],
[-10.7865, 15.4305],
[8.6985, 10.8675],
[11.4975, -9.5715],
[7.8435, 7.4835],
[-5.3325, 6.5025],
| [6.7905, -6.3765],
[-6.1695, -0.8235],
])
DST = np.array([
[0, 0],
[0, 5800],
[4900, 5800],
[4900, 0],
[4479, 4580],
[1176, 3660],
[3754, 790],
| [1024, 1931],
])
def test_stackcopy():
layers = 4
x = np.empty((3, 3, layers))
y = np.eye(3, 3)
_stackcopy(x, y)
for i in range(layers):
assert_array_almost_equal(x[..., i], y)
def test_estimate_transform():
for tform in ('similarity', 'affine', 'projective', 'polynomial'):
estimate_transform(tform, SRC[:2, :], DST[:2, :])
assert_raises(ValueError, estimate_transform, 'foobar',
SRC[:2, :], DST[:2, :])
def test_matrix_transform():
tform = AffineTransform(scale=(0.1, 0.5), rotation=2)
assert_equal(tform(SRC), matrix_transform(SRC, tform._matrix))
def test_similarity_estimation():
# exact solution
tform = estimate_transform('similarity', SRC[:2, :], DST[:2, :])
assert_array_almost_equal(tform(SRC[:2, :]), DST[:2, :])
assert_equal(tform._matrix[0, 0], tform._matrix[1, 1])
assert_equal(tform._matrix[0, 1], - tform._matrix[1, 0])
# over-determined
tform2 = estimate_transform('similarity', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
assert_equal(tform2._matrix[0, 0], tform2._matrix[1, 1])
assert_equal(tform2._matrix[0, 1], - tform2._matrix[1, 0])
# via estimate method
tform3 = SimilarityTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_similarity_init():
# init with implicit parameters
scale = 0.1
rotation = 1
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = SimilarityTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.translation, translation)
# test special case for scale if rotation=0
scale = 0.1
rotation = 0
translation = (1, 1)
tform = SimilarityTransform(scale=scale, rotation=rotation,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.translation, translation)
def test_affine_estimation():
# exact solution
tform = estimate_transform('affine', SRC[:3, :], DST[:3, :])
assert_array_almost_equal(tform(SRC[:3, :]), DST[:3, :])
# over-determined
tform2 = estimate_transform('affine', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = AffineTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_affine_init():
# init with implicit parameters
scale = (0.1, 0.13)
rotation = 1
shear = 0.1
translation = (1, 1)
tform = AffineTransform(scale=scale, rotation=rotation, shear=shear,
translation=translation)
assert_array_almost_equal(tform.scale, scale)
assert_array_almost_equal(tform.rotation, rotation)
assert_array_almost_equal(tform.shear, shear)
assert_array_almost_equal(tform.translation, translation)
# init with transformation matrix
tform2 = AffineTransform(tform._matrix)
assert_array_almost_equal(tform2.scale, scale)
assert_array_almost_equal(tform2.rotation, rotation)
assert_array_almost_equal(tform2.shear, shear)
assert_array_almost_equal(tform2.translation, translation)
def test_piecewise_affine():
tform = PiecewiseAffineTransform()
tform.estimate(SRC, DST)
# make sure each single affine transform is exactly estimated
assert_array_almost_equal(tform(SRC), DST)
assert_array_almost_equal(tform.inverse(DST), SRC)
def test_projective_estimation():
# exact solution
tform = estimate_transform('projective', SRC[:4, :], DST[:4, :])
assert_array_almost_equal(tform(SRC[:4, :]), DST[:4, :])
# over-determined
tform2 = estimate_transform('projective', SRC, DST)
assert_array_almost_equal(tform2.inverse(tform2(SRC)), SRC)
# via estimate method
tform3 = ProjectiveTransform()
tform3.estimate(SRC, DST)
assert_array_almost_equal(tform3._matrix, tform2._matrix)
def test_projective_init():
tform = estimate_transform('projective', SRC, DST)
# init with transformation matrix
tform2 = ProjectiveTransform(tform._matrix)
assert_array_almost_equal(tform2._matrix, tform._matrix)
def test_polynomial_estimation():
# over-determined
tform = estimate_transform('polynomial', SRC, DST, order=10)
assert_array_almost_equal(tform(SRC), DST, 6)
# via estimate method
tform2 = PolynomialTransform()
tform2.estimate(SRC, DST, order=10)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_init():
tform = estimate_transform('polynomial', SRC, DST, order=10)
# init with transformation parameters
tform2 = PolynomialTransform(tform._params)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_default_order():
tform = estimate_transform('polynomial', SRC, DST)
tform2 = estimate_transform('polynomial', SRC, DST, order=2)
assert_array_almost_equal(tform2._params, tform._params)
def test_polynomial_inverse():
assert_raises(Exception, PolynomialTransform().inverse, 0)
def test_union():
tform1 = SimilarityTransform(scale=0.1, rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
tform1 = AffineTransform(scale=(0.1, 0.1), rotation=0.3)
tform2 = SimilarityTransform(scale=0.1, rotation=0.9)
tform3 = SimilarityTransform(scale=0.1 ** 2, rotation=0.3 + 0.9)
tform = tform1 + tform2
assert_array_almost_equal(tform._matrix, tform3._matrix)
assert tform.__class__ == ProjectiveTransform
def test_geometric_tform():
tform = GeometricTransform()
assert_raises(NotImplementedError, tform, 0)
assert_raises(NotImplementedError, tform.inverse, 0)
assert_raises(NotImplementedError, tform.__add__, 0)
def test_invalid_input():
assert_raises(ValueError, ProjectiveTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform, np.zeros((2, 3)))
assert_raises(ValueError, SimilarityTransform, np.zeros((2, 3)))
assert_raises(ValueError, AffineTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, SimilarityTransform,
matrix=np.zeros((2, 3)), scale=1)
assert_raises(ValueError, PolynomialTransform, np.zeros((3, 3)))
if __name__ == "__main__":
from numpy.testing import run_module_suite
run_module_suite()
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.proc | ess_artificial | _dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 12, transform = "BoxCox", sigma = 0.0, exog_count = 20, ar_order = 12); |
"""
Given an array of integers, find out whether there are two distinct indices i
and j in the array such that the difference between nums[i] and nums[j] is at
most t and the difference between i and j is at most k.
"""
class Solution(object):
def containsNearbyAlmostDuplicate(self, nums, k, t):
"""
:type nums: List[int]
:type k: int
:type t: int
:rtype: bool
| """
if nums is None or nums == []:
return False
if k < 1 or t < 0:
return False
buckets = collections.defaultdict(int)
| length = len(nums)
width = t + 1
for i in range(length):
key = nums[i] // width
if key in buckets:
return True
if key - 1 in buckets and abs(nums[i] - buckets[key - 1]) < width:
return True
if key + 1 in buckets and abs(nums[i] - buckets[key + 1]) < width:
return True
buckets[key] = nums[i]
if i >= k:
del buckets[nums[i - k] // width]
return False
import collections
a = Solution()
print(a.containsNearbyAlmostDuplicate([-1, -1], 1, 0))
print(a.containsNearbyAlmostDuplicate([1, 3, 1], 1, 1))
print(a.containsNearbyAlmostDuplicate([10, 20, 30, 25, 50], 2, 6))
"""
Note:
The idea is like the bucket sort algorithm. Suppose we have consecutive buckets
covering the range of nums with each bucket a width of (t+1). If there are two
item with difference <= t, one of the two will happen:
(1) the two in the same bucket
(2) the two in neighbor buckets
https://discuss.leetcode.com/topic/27608/java-python-one-pass-solution-o-n-time-o-n-space-using-buckets
"""
|
import os
import numpy
from numpy import *
import math
from scipy import | integrate, linalg
from matplotlib import pyplot
from pylab import *
class Freestream:
"""
Freestream conditions.
"""
def __init__(self, u_inf=1.0, alpha=0.0):
"""
Sets the freestream speed and angle (in degrees).
Parameters
----------
u_inf: float, optional
Freestream speed;
default: 1.0.
| alpha: float, optional
Angle of attack in degrees;
default 0.0.
"""
self.u_inf = u_inf
self.alpha = alpha*numpy.pi/180.0 # degrees to radians
|
from pkg_resources import parse_version
from pyramid.interfaces import (
PHASE1_CONFIG,
PHASE2_CONFIG,
)
import venusian
def includeme(config):
config.registry['migrator'] = Migrator()
config.add_directive('add_upgrade', add_upgrade)
config.add_directive('add_upgrade_step', add_upgrade_step)
config.add_directive('set_upgrade_finalizer', set_upgrade_finalizer)
config.add_directive(
'set_default_upgrade_finalizer', set_default_upgrade_finalizer)
config.add_request_method(upgrade, 'upgrade')
class ConfigurationError(Exception):
pass
class UpgradeError(Exception):
pass
class UpgradePathNotFound(UpgradeError):
def __str__(self):
return "%r from %r to %r (at %r)" % self.args
class VersionTooHigh(UpgradeError):
pass
class Migrator(object):
""" Migration manager
"""
def __init__(self):
self.schema_migrators = {}
self.default_finalizer = None
def add_upgrade(self, schema_name, version, finalizer=None):
if schema_name in self.schema_migrators:
raise ConfigurationError('duplicate schema_name', schema_name)
if finalizer is None:
finalizer = self.default_finalizer
schema_migrator = SchemaMigrator(schema_name, version, finalizer)
self.schema_migrators[schema_name] = schema_migrator
def upgrade(self, schema_name, value,
current_version='', target_version=None, **kw):
schema_migrator = self.schema_migrators[schema_name]
return schema_migrator.upgrade(
value, current_version, target_version, **kw)
def __getitem__(self, schema_name):
return self.schema_migrators[schema_name]
def __contains__(self, schema_name):
return schema_name in self.schema_migrators
class SchemaMigrator(object):
""" Manages upgrade steps
"""
def __init__(self, name, version, finalizer=None):
self.__name__ = name
self.version = version
self.upgrade_steps = {}
self.finalizer = finalizer
def add_upgrade_step(self, step, source='', dest=None):
if dest is None:
dest = self.version
if parse_version(dest) <= parse_version(source):
raise ValueError("dest is less than source", dest, source)
if parse_version(source) in self.upgrade_steps:
raise ConfigurationError('duplicate step for source', source)
self.upgrade_steps[parse_version(source)] = UpgradeStep(step, source, dest)
def upgrade(self, value, current_version='', target_version=None, **kw):
if target_version is None:
target_version = self.version
if parse_version(current_version) > parse_version(target_version):
raise VersionTooHigh(self.__name__, current_version, target_version)
# Try to find a path from current to target versions
steps = []
version = current_version
# If no entry exists for the current_version, fallback to ''
if parse_version(version) not in self.upgrade_steps:
try:
| step = self.upgrade_steps[parse_version('')]
except KeyError:
pass
else:
if parse_version(step.dest) >= parse_version(version):
steps.append(step)
version = step.dest
while parse_version(version) < parse_version(target_version):
| try:
step = self.upgrade_steps[parse_version(version)]
except KeyError:
break
steps.append(step)
version = step.dest
if version != target_version:
raise UpgradePathNotFound(
self.__name__, current_version, target_version, version)
# Apply the steps
system = {}
system.update(kw)
for step in steps:
next_value = step(value, system)
if next_value is not None:
value = next_value
if self.finalizer is not None:
next_value = self.finalizer(value, system, version)
if next_value is not None:
value = next_value
return value
class UpgradeStep(object):
def __init__(self, step, source, dest):
self.step = step
self.source = source
self.dest = dest
def __call__(self, value, system):
return self.step(value, system)
# Imperative configuration
def add_upgrade(config, schema_name, version, finalizer=None):
if finalizer is not None:
config.set_upgrade_finalizer(schema_name, finalizer)
def callback():
migrator = config.registry['migrator']
migrator.add_upgrade(schema_name, version)
config.action(
('add_upgrade', schema_name),
callback, order=PHASE2_CONFIG)
def add_upgrade_step(config, schema_name, step, source='', dest=None):
def callback():
migrator = config.registry['migrator']
migrator[schema_name].add_upgrade_step(step, source, dest)
config.action(
('add_upgrade_step', schema_name, parse_version(source)),
callback)
def set_upgrade_finalizer(config, schema_name, finalizer):
def callback():
migrator = config.registry['migrator']
migrator[schema_name].finalizer = finalizer
config.action(
('set_upgrade_finalizer', schema_name),
callback)
def set_default_upgrade_finalizer(config, finalizer):
def callback():
migrator = config.registry['migrator']
migrator.default_finalizer = finalizer
config.action(
'set_default_upgrade_finalizer',
callback, order=PHASE1_CONFIG)
# Declarative configuration
def upgrade_step(schema_name, source='', dest=None):
""" Register an upgrade step
"""
def decorate(step):
def callback(scanner, factory_name, factory):
scanner.config.add_upgrade_step(schema_name, step, source, dest)
venusian.attach(step, callback, category='migrator')
return step
return decorate
def upgrade_finalizer(schema_name):
""" Register a finalizer
"""
def decorate(finalizer):
def callback(scanner, factory_name, factory):
scanner.config.set_upgrade_finalizer(schema_name, finalizer)
venusian.attach(finalizer, callback, category='migrator')
return finalizer
return decorate
def default_upgrade_finalizer(finalizer):
def callback(scanner, factory_name, factory):
scanner.config.set_default_upgrade_finalizer(finalizer)
venusian.attach(finalizer, callback, category='migrator')
return finalizer
# Upgrade
def upgrade(request, schema_name, value,
current_version='', target_version=None, **kw):
migrator = request.registry['migrator']
return migrator.upgrade(
schema_name, value, current_version='', target_version=None,
request=request, **kw)
|
import tsp.algorithms
import time
if __name__ == "__main__":
cities_number = 5
max_distance = 100
distances_matrix = tsp.algorithms.get_random_distances_matrix(cities_number, max_distance)
start = time.time()
optimal_path = tsp.algorithms.BruteForceTSPSolver(distances_matrix).solve()
print("Optimal path is " + str(optimal_path))
print("Distance is " + str(tsp.algorithms.get_trip_distance(distances_matrix, optimal_path)))
print("Computational time | is: {0:.2f} seconds".format(time.time() - start))
start = time.time()
worst_path = tsp.algorithms.BruteForceTSPWorstPathSolver(distances_matrix).solve()
print("\nWorst path is " + str(worst_path))
print("Distance is " + str(tsp.algorithms.get_trip_distance(distances_matrix, worst_path)))
print("Computational time is: {0:.2f} seconds".format(time.time() - start))
|
start = time.time()
boltzmann_path = tsp.algorithms.BoltzmannMachineTSPSolver(distances_matrix).solve()
print("\nBoltzmann path is " + str(boltzmann_path))
print("Distance is " + str(tsp.algorithms.get_trip_distance(distances_matrix, boltzmann_path)))
print("Computational time is: {0:.2f} seconds".format(time.time() - start))
|
import os
import re
from nose.tools import raises
import seqpoet
class TestSequence:
def setup(self):
self.seq1 = 'ACATacacagaATAgagaCacata'
self.illegal = 'agagcatgcacthisisnotcorrect'
def test_sequence_length(self):
s = seqpoet.Sequence(self.seq1)
assert len(s) == len(self.seq1)
def test_casin | g(self):
s = seqpoet.Sequence(self.seq1)
assert re.match('^[acgt]+$', str(s))
def test_reverse_complement(self):
s = seqpoet.Sequence(self.seq1)
s2 = seqpoet.Sequence('acct')
assert s.revcomp() == 'tatgtgtctctattctgtgtatgt', \
'"{0}" is not "tatgtgtctctattctgtgtatgt"'.format( | s.revcomp().seq)
assert s2.revcomp() == 'aggt', \
'"{0}" is not "aggt"'.format(s2.revcomp().seq)
def test_str(self):
s = seqpoet.Sequence(self.seq1)
assert str(s) == self.seq1.lower()
def test_repr(self):
s = seqpoet.Sequence(self.seq1)
assert repr(s) == '<Sequence: acata...>'
assert repr(s.revcomp()) == '<Sequence: tatgt...>'
def test_indexing(self):
s = seqpoet.Sequence(self.seq1)
assert s[4] == 'a'
assert s[:5] == 'acata'
assert s[-6:] == 'cacata'
assert s[4:8] == 'acac'
def test_equality(self):
s = seqpoet.Sequence(self.seq1)
assert s == self.seq1.lower()
assert s[:3] == seqpoet.Sequence(self.seq1[:3])
@raises(ValueError)
def test_illegal_characters(self):
s = seqpoet.Sequence(self.illegal)
|
from twython import Twython, TwythonError, TwythonAuthError
from .config import app_key, app_secret, screen_name
import unittest
class TwythonAuthTestCase(unittest.TestCase):
def setUp(self):
self.api = Twython(app_key, app_secret)
self.bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET')
self.bad_api_invalid_tokens = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
'BAD_OT', 'BAD_OTS')
self.oauth2_api = Twython(app_key, app_secret, oauth_version=2)
self.oauth2_bad_api = Twython('BAD_APP_KEY', 'BAD_APP_SECRET',
oauth_version=2)
def test_get_authentication_tokens(self):
"""Test getting authentication tokens works"""
self.api.get_authentication_tokens(callback_url='http://google.com/',
force_login=True,
screen_name=screen_name)
def test_get_authentication_tokens_bad_tokens(self):
"""Test getting authentication tokens with bad tokens
raises TwythonAuthError"""
self.assertRaises(TwythonAuthError, self.bad_api.get_authentication_tokens,
callback_url='http://google.com/')
def test_get_authorized_tokens_bad_tokens(self):
"""Test getting final tokens fails with wrong tokens"""
self.assertRaises(TwythonError, self.bad_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER | ')
def test_get_authorized_tokens_invalid_or_expired_tokens(self):
"""Test getting final token fails when i | nvalid or expired tokens have been passed"""
self.assertRaises(TwythonError, self.bad_api_invalid_tokens.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
def test_get_authentication_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authentication_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authentication_tokens)
def test_get_authorization_tokens_raises_error_when_oauth2(self):
"""Test when API is set for OAuth 2, get_authorized_tokens raises
a TwythonError"""
self.assertRaises(TwythonError, self.oauth2_api.get_authorized_tokens,
'BAD_OAUTH_VERIFIER')
def test_obtain_access_token(self):
"""Test obtaining an Application Only OAuth 2 access token succeeds"""
self.oauth2_api.obtain_access_token()
def test_obtain_access_token_bad_tokens(self):
"""Test obtaining an Application Only OAuth 2 access token using bad app tokens fails"""
self.assertRaises(TwythonAuthError,
self.oauth2_bad_api.obtain_access_token)
def test_obtain_access_token_raises_error_when_oauth1(self):
"""Test when API is set for OAuth 1, obtain_access_token raises a
TwythonError"""
self.assertRaises(TwythonError, self.api.obtain_access_token)
|
holds a mapping of *name* to *Expectation*, where *Expectation* has
the following fields:
*name*
key name in the song dictionary (equal to the *name* keying ``md_expectations``).
*type*:
a string holding a `validictory <https://github.com/sunlightlabs/validictory>`__ type.
Possible values:
:'string':
str and unicode objects
:'integer':
ints, longs
:'number':
ints, longs and floats
:'boolean':
bools
:'object':
dicts
:'array':
lists and tuples
:'null':
``None``
:'any':
any type is | possible
*mutable*:
``True`` if client can change the value.
*optional*:
``True`` if the key is not guaranteed to be present.
*volatile*:
``True`` if the key's value can change between observations without client mutation.
*depends_on*:
the name of the key we transform to take our value from, or ``None``.
These fields can never be changed: they are automatically set to
a modified form of some other field's value.
See *dependent_transformation* for more information.
*d | ependent_transformation*:
``None``, or a function ``lambda dependent_value: our_value``.
For example, the ``artistNorm`` field is automatically set to the lowercase
of the ``artist`` field.
So, ``artistNorm.depends_on == 'artist'``, and the *dependent_transformation* for
``artistNorm`` can be written as ``lambda artist: artist.lower()``.
*allowed_values*:
sequence of allowed values.
*explanation*:
an explanatory string, typically empty for obvious fields.
The above information is used to generate the documentation below.
If you find an example to clarify these expectations, please `submit an issue
<https://github.com/simon-weber/Unofficial-Google-Music-API/issues>`__.
"""
from collections import defaultdict, namedtuple
_Expectation = namedtuple(
'_Expectation',
[
'name', 'type', 'mutable', 'optional', 'volatile',
'depends_on', 'dependent_transformation',
'allowed_values', 'explanation'
]
)
class Expectation(_Expectation):
"""Instantiated to represent information about a single metadata key."""
#This class just wraps the namedtuple to provide easy construction and some methods.
def __new__(cls, name, type, mutable, optional, volatile=False,
depends_on=None, dependent_transformation=None,
allowed_values=None, explanation=''):
return cls.__bases__[0].__new__(
cls,
name, type, mutable, optional, volatile,
depends_on, dependent_transformation,
allowed_values, explanation
)
def get_schema(self):
"""Return a validictory schema for this key."""
schema = {}
schema["type"] = self.type
if self.type == "string":
schema["blank"] = True # allow blank strings
if self.optional:
schema["required"] = False
return schema
#: All the expectations.
_all_expts = [
Expectation(name, 'string', mutable=True, optional=False) for name in
(
'composer', 'album', 'albumArtist', 'genre', 'name', 'artist', 'comment',
)
] + [
Expectation(name, 'integer', mutable=True, optional=True) for name in
(
'disc', 'year', 'track', 'totalTracks', 'totalDiscs', 'explicitType',
)
] + [
Expectation(name, type_str, mutable=False, optional=False, explanation=explain)
for (name, type_str, explain) in
(
('durationMillis', 'integer',
'length of a song in milliseconds.'),
('id', 'string',
'a per-user unique id for this song; sometimes referred to as *server id* or *song id*.'),
('creationDate', 'integer', ''),
('type', 'integer',
'An enum: 1: free/purchased, 2: uploaded/not matched, 6: uploaded/matched'),
('beatsPerMinute', 'integer',
"the server does not calculate this - it's just what was in track metadata"),
('subjectToCuration', 'boolean', 'meaning unknown.'),
('curatedByUser', 'boolean', 'meaning unknown'),
('curationSuggested', 'boolean', 'meaning unknown'),
)
] + [
Expectation(name, type_str, mutable=False, optional=True, explanation=explain)
for (name, type_str, explain) in
(
('storeId', 'string', 'an id of a matching track in the Play Store.'),
('reuploading', 'boolean', 'scan-and-match reupload in progress.'),
('albumMatchedId', 'string', 'id of matching album in the Play Store?'),
('pending', 'boolean', 'unsure; server processing (eg for store match) pending?'),
('url', 'string', 'meaning unknown.'),
('bitrate', 'integer', "bitrate in kilobytes/second (eg 320)."),
('playlistEntryId', 'string', 'identifies position in the context of a playlist.'),
('albumArtUrl', 'string', "if present, the url of an image for this song's album art."),
('artistMatchedId', 'string', 'id of a matching artist in the Play Store?'),
('albumPlaybackTimestamp', 'integer', 'UTC/microsecond timestamp: the last time this album was played?'),
('origin', 'array', '???'),
('artistImageBaseUrl', 'string', 'like albumArtUrl, but for the artist. May be blank.'),
('recentTimestamp', 'integer', 'UTC/microsecond timestamp: meaning unknown.'),
('deleted', 'boolean', ''),
('matchedId', 'string', 'meaning unknown; related to scan and match?'),
)
] + [
Expectation(name + 'Norm', 'string', mutable=False, optional=False,
depends_on=name,
dependent_transformation=lambda x: x.lower(),
explanation="automatically set to lowercase of *%s*." % name)
for name in
(
'artist', 'albumArtist', 'album'
)
] + [
# 0, 1, 5: no, down, up thumbs
Expectation('rating', 'integer', mutable=True,
optional=False, allowed_values=tuple(range(6)),
explanation='0 == no thumb, 1 == down thumb, 5 == up thumb.'),
Expectation('lastPlayed', 'integer', mutable=False, optional=True, volatile=True,
explanation='UTC/microsecond timestamp'),
Expectation('playCount', 'integer', mutable=True, optional=False),
Expectation('title', 'string', mutable=False, optional=False,
depends_on='name', dependent_transformation=lambda x: x,
explanation='misleading! automatically set to *name*.'),
Expectation('titleNorm', 'string', mutable=False, optional=False,
depends_on='name', dependent_transformation=lambda x: x.lower(),
explanation='misleading! automatically set to lowercase of *name*.'),
]
#Create the dict for client code. If they look up something we don't know about,
# give them a flexible immutable key.
_immutable_key = lambda: Expectation('unknown', 'any', mutable=False, optional=True)
md_expectations = defaultdict(_immutable_key)
for expt in _all_expts:
md_expectations[expt.name] = expt
#This code is a super-hack. KnownMetadataFields exists _purely_ for documentation.
#We want dynamic documentation based on _all_expts, but __doc__ isn't a writable field
#for non-{function, class, module} objects. So, we create a dummy class and dynamically
#create its docstring to be arbitrary reST that documents our expectations.
def detail_line(e):
"""Given an expectation, return a readable one-line explanation of it."""
fields = [fname for fname in ('mutable', 'optional', 'volatile')
if getattr(e, fname, None)]
if e.depends_on:
fields.append("depends_on=%s" % e.depends_on)
line = ', '.join(fields)
if line:
line = "*(%s)*" % line
return line
#Note the hackiness of this class.
dynamic_docs = """
**This class exists only for documentation; do not try to import it.**
Instead, client code should use ``gmusicapi.protocol.metadata.md_expectations``.
See `the code <https://github.com/simon-weber/Unofficial-Google-Music-API/blob
/develop/gmusicapi/protocol/metadata.py>`__ for an explanation of this hack.
Ideas to clean this up are welcomed.
"""
#Create a reST definitio |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.