prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
#!/usr/bin/env python
# encoding: utf-8
"""
Make a grid of synths for a set of attenuations.
2015-04-30 - Created by Jonathan Sick
"""
import argparse
import numpy as np
from starfisher.pipeline import PipelineBase
from androcmd.planes import BasicPhatPlanes
from androcmd.phatpipeline import (
SolarZIsocs, SolarLockfile,
PhatGaussianDust, PhatCrowding)
from androcmd.phatpipeline import PhatCatalog
def main():
args = parse_args()
av_grid = np.arange(0., args.max_av, args.delta_av)
if args.av is not None:
av = float(args.av)
run_pipeline(brick=args.brick, av=av, run_fit=args.fit)
else:
for av in av_grid:
run_pipeline(brick=args.brick, av=av, run_fit=args.fit)
def parse_args():
parser = argparse.ArgumentParser(
description="Grid of synths for a set of Av")
parser.add_argument('brick', type=int)
parser.add_argument('--max-av', type=float, default=1.5)
parser.add_argument('--delta-av', type=float, default=0.1)
parser.add_argument('--fit', action='store_true', default=False)
parser.add_argument('--av', default=None)
return parser.parse_args()
def run_pipeline(brick=23, av=0., run_fit=False):
dataset = PhatCatalog(brick)
pipeline = Pipeline(root_dir="b{0:d}_{1:.2f}".format(brick, av),
young_av=av, old_av=av, av_sigma_ratio=0.25,
isoc_args=dict(isoc_kind='parsec_CAF09_v1.2S',
photsys_version='yang'))
print(pipeline)
print('av {0:.1f} done'.format(av))
if run_fit:
pipeline.fit('f475w_f160w', ['f475w_f160w'], dataset)
pipeline.fit('rgb', ['f475w_f814w_rgb'], dataset)
pipeline.fit('ms', ['f475w_f814w_ms'], dataset)
class Pipeline(BasicPhatPlanes, SolarZIsocs,
SolarLockfile, PhatGaussianDust, PhatCrowding, PipelineBase):
"""A pipeline for fitting PHAT bricks with solar metallicity isochrones."""
def __init__(se | lf, **kwargs):
super(Pipeline, self | ).__init__(**kwargs)
if __name__ == '__main__':
main()
|
import os
from rednotebook.util.f | ilesystem import get_journal_title
def test_journal_title():
root = os.path.abspath(os.sep)
dirs = [
("/home/my journal", "my journal"),
("/my journal/", "my journal"),
("/home/name/Journal", "Journal"),
("/h | ome/name/jörnal", "jörnal"),
(root, root),
]
for path, title in dirs:
assert get_journal_title(path) == title
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the spec | ific language governing permissions and
# limitations under the License.
import inspect
import gast
import pytest
from tangent import compile as compile_
from tangent import quoting
def | test_compile():
def f(x):
return x * 2
f = compile_.compile_function(quoting.parse_function(f))
assert f(2) == 4
assert inspect.getsource(f).split('\n')[0] == 'def f(x):'
def f(x):
return y * 2
f = compile_.compile_function(quoting.parse_function(f), {'y': 3})
assert f(2) == 6
def test_function_compile():
with pytest.raises(TypeError):
compile_.compile_function(quoting.quote('x = y'))
with pytest.raises(ValueError):
compile_.compile_function(gast.parse('x = y'))
if __name__ == '__main__':
assert not pytest.main([__file__])
|
from django.ap | ps import AppConfig
class FeaturesConfig(AppConfig):
name = 'features'
def ready(self):
import featu | res.signals
|
# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""multisite_vim
Revision ID: 5246a6bd410f
Revises: 24bec5f211c7
Create Date: 2016-03-22 14:05:15.129330
"""
# revision identifiers, used by Alembic.
revision = '5246a6bd410f'
down_revision = '24bec5f211c7'
from alembic import op
import sqlalchemy as sa
def upgrade(active_plugins=None, options=None):
op.create_table('vims',
sa.Column('id', sa.String(length=255), nullable=False),
sa.Column('type', sa.String(length=255), nullable=False),
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('placement_attr', sa.PickleType(), nullable=True),
sa.Column('shared', | sa.Boolean(), server_default=sa.text(u'true'),
nullable=False),
sa.PrimaryKeyConstraint('id | '),
mysql_engine='InnoDB'
)
op.create_table('vimauths',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('vim_id', sa.String(length=255), nullable=False),
sa.Column('password', sa.String(length=128), nullable=False),
sa.Column('auth_url', sa.String(length=255), nullable=False),
sa.Column('vim_project', sa.PickleType(), nullable=False),
sa.Column('auth_cred', sa.PickleType(), nullable=False),
sa.ForeignKeyConstraint(['vim_id'], ['vims.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('auth_url')
)
op.add_column(u'devices', sa.Column('placement_attr', sa.PickleType(),
nullable=True))
op.add_column(u'devices', sa.Column('vim_id', sa.String(length=36),
nullable=False))
op.create_foreign_key(None, 'devices', 'vims', ['vim_id'], ['id'])
def downgrade(active_plugins=None, options=None):
op.drop_constraint(None, 'devices', type_='foreignkey')
op.drop_column(u'devices', 'vim_id')
op.drop_column(u'devices', 'placement_attr')
op.drop_table('vimauths')
op.drop_table('vims')
|
"""
Implementation of model
"""
import numpy as np
import numpy.random as npr
from scipy import ndimage
from configuration import get_config
config = get_config()
class LatticeState(object):
""" Treat 1D list as 2D lattice and handle coupled system
This helps with simply passing this object to scipy's odeint
"""
def __init__(self, width, height, pacemakers=[]):
""" Initialize lattice
"""
self.width = width
self.height = height
self.pacemakers = pacemakers
self.discrete_laplacian = np.ones((3, 3)) * 1/2
self.discrete_laplacian[1, 1] = -4
self.state_matrix = np.zeros((width, height))
self.tau_matrix = np.ones((width, height)) * (-config.t_arp) # in ARP
def _update_state_matrix(self, camp, exci):
""" Compute state matrix value, with
quiescent/refractory cell -> 0
firing cell -> 1
"""
# this function gets executed once per timestep
for j in range(self.width):
for i in range(self.height):
if self.state_matrix[i, j] == 0: # not firing
self.handle_off_cell(i, j, camp, exci)
else: # firing
self.handle_on_cell(i, j)
def handle_on_cell(self, i, j):
""" Handle cell where state_matrix == 1
"""
self.tau_matrix[i, j] += config.dt
if self.tau_matrix[i, j] >= 0: # end of firing reached
self.state_matrix[i, j] = 0
self.tau_matrix[i, j] = -config.t_arp
def handle_off_cell(self, i, j, camp, exci):
""" Handle cell where state_matrix == 0
"""
tau = self.tau_matrix[i, j]
if tau >= 0: # in RRP
A = ((config.t_rrp + config.t_arp) \
* (config.c_max - config.c_min)) / config.t_rrp
t = (config.c_max - A * (tau / (tau + config.t_arp))) \
* (1 - exci[i, j])
# increase time up to t_rrp
if tau < config.t_rrp:
self.tau_matrix[i, j] += config.dt
# check threshold
| if camp[i, j] > t:
self.fire_cell(i, j)
# handle pacemaker
if (i, j) in self.pacemakers and npr.random() < config.p:
| self.fire_cell(i, j)
else: # in ARP
self.tau_matrix[i, j] += config.dt
def fire_cell(self, i, j):
""" Fire cell `i`x`j`
"""
self.state_matrix[i, j] = 1
self.tau_matrix[i, j] = -config.t_f
def get_size(self):
""" Return number of cells in underlying system
"""
return self.width * self.height
def _state_vec2camp_exci(self, state):
""" Convert ODE state vector to cAMP and excitability matrices
"""
flat_camp = state[:self.get_size()]
flat_exci = state[self.get_size():]
camp = np.reshape(flat_camp, (self.width, self.height))
exci = np.reshape(flat_exci, (self.width, self.height))
return camp, exci
def _camp_exci2state_vec(self, camp, exci):
""" Reverse of `_state_vec2camp_exci`
"""
flat_camp = np.reshape(camp, self.get_size())
flat_exci = np.reshape(exci, self.get_size())
return np.append(flat_camp, flat_exci)
def get_ode(self, state, t):
""" Return corresponding ODE
Structure:
[
camp00, camp01, .. ,camp0m, camp10, .., campnm
...
exci00, exci01, .. ,exci0m, exci10, .., excinm
]
"""
# parse ODE state
camp, exci = self._state_vec2camp_exci(state)
# compute next iteration
self._update_state_matrix(camp, exci)
next_camp = np.zeros((self.width, self.height))
next_exci = np.zeros((self.width, self.height))
laplacian_conv = ndimage.convolve(
camp, self.discrete_laplacian,
mode='constant', cval=0.0
)
for j in range(self.width):
for i in range(self.height):
next_camp[i, j] = -config.gamma * camp[i, j] \
+ config.r * self.state_matrix[i, j] \
+ config.D * laplacian_conv[i, j]
if exci[i, j] < config.e_max:
next_exci[i, j] = config.eta + config.beta * camp[i, j]
return self._camp_exci2state_vec(next_camp, next_exci)
def parse_result(self, orig_res):
""" Parse integration result
"""
t_range = len(orig_res)
res = orig_res.T
flat_camp = res[:self.get_size()].reshape(self.get_size() * t_range)
flat_exci = res[self.get_size():].reshape(self.get_size() * t_range)
camp = np.reshape(flat_camp, (self.width, self.height, t_range))
exci = np.reshape(flat_exci, (self.width, self.height, t_range))
return camp, exci
def __repr__(self):
""" Nice visual representation of lattice
"""
return '%dx%d' % (self.width, self.height)
|
from unittest import TestCase
class TestImports(TestCase):
_multiprocess_can_split_ = True
def test_coeff2header_imp | ort(self):
import sk_dsp_comm.coeff2header
def test_coeff2header_from(self):
from sk_dsp_comm import coeff2h | eader
def test_digitalcom_import(self):
import sk_dsp_comm.digitalcom
def test_digitalcom_from(self):
from sk_dsp_comm import digitalcom
def test_fec_conv_import(self):
import sk_dsp_comm.fec_conv
def test_fec_conv_from(self):
from sk_dsp_comm import digitalcom
def test_fir_design_helper_import(self):
from sk_dsp_comm import fir_design_helper
def test_fir_design_helper_from(self):
import sk_dsp_comm.fir_design_helper
def test_iir_design_helper_from(self):
from sk_dsp_comm import iir_design_helper
def test_iir_design_helper_import(self):
import sk_dsp_comm.iir_design_helper
def test_multirate_helper_from(self):
from sk_dsp_comm import multirate_helper
def test_multirate_helper_import(self):
import sk_dsp_comm.multirate_helper
def test_sigsys_from(self):
from sk_dsp_comm import sigsys
def test_sigsys_import(self):
import sk_dsp_comm.sigsys
def test_synchronization_from(self):
from sk_dsp_comm import synchronization
def test_synchronization_import(self):
import sk_dsp_comm.synchronization |
#! /usr/bin/env python3
#
# Copyright (c) 2014 Joseph Keshet, Morgan Sonderegger, Thea Knowles
#
# This file is part of Autovot, a package for automatic extraction of
# voice onset time (VOT) from audio files.
#
# Autovot is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# Autovot is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with Autovot. If not, see
# <http://www.gnu.org/licenses/>.
#
# auto_vot_append_files.py : Append set of features and labels
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import zip
from builtins import open
from builtins import int
from future import standard_library
standard_library.install_aliases()
import argparse
from helpers.utilities import *
if __name__ == "__main__":
# parse arguments
parser = argparse.ArgumentParser(description='A | ppend set of features and labels')
parser.add_argument('features_filename', help="front end features filename")
parser.add_argument('labels_filename', help="front end labels filename")
pa | rser.add_argument('appended_features_filename', help="front end features filename to be appended")
parser.add_argument('appended_labels_filename', help="front end labels filename to be appended")
parser.add_argument("--logging_level", help="Level of verbosity of information printed out by this program ("
"DEBUG, INFO, WARNING or ERROR), in order of increasing verbosity. "
"See http://docs.python.org/2/howto/logging for definitions. ("
"default: %(default)s)", default="INFO")
args = parser.parse_args()
logging_defaults(args.logging_level)
# open files
in_features = open(args.features_filename, 'r')
in_labels = open(args.labels_filename, 'r')
# read infra text header
header = in_labels.readline()
dims = header.split()
# read file lines
lines = list()
for x, y in zip(in_features, in_labels):
lines.append((x, y))
# close files
in_features.close()
in_labels.close()
if len(lines) != int(dims[0]):
logging.error("%s and %s are not of the same length or %s is missing a header" % (args.features_filename,
args.labels_filename,
args.labels_filename))
exit(-1)
try:
# try to open the files
app_features = open(args.appended_features_filename, 'r')
app_labels = open(args.appended_labels_filename, 'r')
# now read the appended files
app_features = open(args.appended_features_filename, 'r')
app_labels = open(args.appended_labels_filename, 'r')
# read infra text header
app_header = app_labels.readline()
app_dims = app_header.split()
# read file to lines
for x, y in zip(app_features, app_labels):
lines.append((x, y))
# close files
in_features.close()
in_labels.close()
# assert header
if len(lines) != int(dims[0])+int(app_dims[0]):
logging.error("Something wrong with the header of %s" % args.appended_labels_filename)
exit(-1)
except Exception as exception:
if exception.errno != 2:
logging.error("Something wrong with opening %s and %s for reading." % (args.appended_features_filename,
args.appended_labels_filename))
# open appended files for writing
out_features = open(args.appended_features_filename, 'w')
out_labels = open(args.appended_labels_filename, 'w')
# write labels header
header = "%d 2\n" % len(lines)
out_labels.write(header)
# write data
for x, y in lines:
out_features.write(x)
out_labels.write(y)
# close files
out_features.close()
out_labels.close()
|
# -*- coding: utf-8 -*-
#
## This file is part of Zenodo.
## Copyright (C) 2012, 2013, 2014 CERN.
##
## Zenodo is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## Zenodo is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Zenodo. If not, see <http://www.gnu.org/licenses/>.
##
## In applying this licence, CERN does not waive the privileges and immunities
## granted to it by virtue of its status as an Intergovernmental Organization
## or submit itself to any jurisdiction.
import os
import shutil
from flask import current_app
from invenio.base.factory import with_app_context
@with_app_context(new_context=True)
def post_handler_database_create(sender, default_data='', *args, **kwargs):
"""Load data after demosite creation."""
from invenio.modules.communities.models import Community
print(">>> Creating collections for communities...")
c = Community.query.filter_by(id='zenodo').first()
c.save_collections()
c = Community.query.filter_by(id='ecfunded').first()
c.save_collections()
print(">>> Fixing dbquery for root collection.")
from invenio.modules.search.models import Collection
from invenio.ext.sqlalchemy import db
c = Collection.query.filter_by(id=1).first()
c.dbquery = '980__a:0->Z AND NOT 980__a:PROVISIONAL AND NOT ' \
'980__a:PENDING AND NOT 980__a:SPAM AND NOT 980__a:REJECTED ' \
'AND NOT 980__a:DARK'
db.session.commit()
@with_app_context(new_context=True)
def clean_data_files(sender, *args, **kwargs):
"""Clean data in directories."""
dirs = [
current_app.config['DEPOSIT_STORAGEDIR'],
current_app.config['CFG_TMPDIR'],
current_app.config['CFG_TMPSHAREDDIR'],
current_app.config['CFG_LOGDIR'],
current_app.config['CFG_CACHEDIR'],
current_app.config['CFG_RUNDIR'],
current_app.config['CFG_BIBDOCFILE_FILEDIR'],
]
for d in dirs:
print(">>> Cleaning {0}".for | mat(d))
if os.path.exists(d):
shutil.rmtree(d)
os.makedirs(d)
@with_app_context(new_context=True)
def post_handler_demosite_populate(sender, default_data='', *args, **kwargs):
| """Load data after records are created."""
|
with('latex+'):
encoding = encoding[6:]
else:
return None
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
"""Convert unicode string to latex."""
output = []
for c in input:
if encoding:
try:
output.append(c.encode(encoding))
continue
except:
pass
if ord(c) in latex_equivalents:
output.append(latex_equivalents[ord(c)])
else:
output += ['{\\char', str(ord(c)), '}']
return ''.join(output), len(input)
def decode(self,input,errors='strict'):
"""Convert latex source string to unicode."""
if encoding:
input = unicode(input,encoding,errors)
# Note: we may get buffer objects here.
# It is not permussable to call join on buffer objects
# but we can make them joinable by calling unicode.
# This should always be safe since we are supposed
# to be producing unicode output anyway.
x = map(unicode,_unlatex(input))
return u''.join(x), len(input)
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
return (Codec().encode,Codec().decode,StreamReader,StreamWriter)
def _tokenize(tex):
"""Convert latex source into sequence of single-token substrings."""
start = 0
try:
# skip quickly across boring stuff
pos = _stoppers.finditer(tex).next().span()[0]
except StopIteration:
yield tex
return
while 1:
if pos > start:
yield tex[start:pos]
if tex[start] == '\\' and not (tex[pos-1].isdigit() and tex[start+1].isalpha()):
while pos < len(tex) and tex[pos].isspace(): # skip blanks after csname
pos += 1
while pos < len(tex) and tex[pos] in _ignore:
pos += 1 # flush control characters
if pos >= len(tex):
return
start = pos
if tex[pos:pos+2] in {'$$':None, '/~':None}: # protect ~ in urls
pos += 2
elif tex[pos].isdigit():
while pos < len(tex) and tex[pos].isdigit():
pos += 1
elif tex[pos] == '-':
while pos < len(tex) and tex[pos] == '-':
pos += 1
elif tex[pos] != '\\' or pos == len(tex) - 1:
pos += 1
elif not tex[pos+1].isalpha():
pos += 2
else:
pos += 1
while pos < len(tex) and tex[pos].isalpha():
pos += 1
if tex[start:pos] == '\\char' or tex[start:pos] == '\\accent':
while pos < len(tex) and tex[pos].isdigit():
pos += 1
class _unlatex:
"""Convert tokenized tex into sequence of unicode strings. Helper for decode()."""
def __iter__(self):
"""Turn self into an iterator. It already is one, nothing to do."""
return self
def __init__(self,tex):
"""Create a new token converter from a string."""
self.tex = tuple(_tokenize(tex)) # turn tokens into indexable list
self.pos = 0 # index of first unprocessed token
self.lastoutput = 'x' # lastoutput must always be nonempty string
def __getitem__(self,n):
"""Return token at offset n from current pos."""
p = self.pos + n
t = self.tex
return p < len(t) and t[p] or None
def next(self):
"""Find and return another piece of converted output."""
if self.pos >= len(self.tex):
raise StopIteration
nextoutput = self.chunk()
if self.lastoutput[0] == '\\' and self.lastoutput[-1].isalpha() and nextoutput[0].isalpha():
nextoutput = ' ' + nextoutput # add extra space to terminate csname
self.lastoutput = nextoutput
return nextoutput
def chunk(self):
"""Grab another set of input tokens and convert them to an output string."""
for delta,c in self.candidates(0):
if c in _l2u:
self.pos += delta
return unichr(_l2u[c])
elif len(c) == 2 and c[1] == 'i' and (c[0],'\\i') in _l2u:
self.pos += delta # correct failure to undot i
return unichr(_l2u[(c[0],'\\i')])
elif len(c) == 1 and c[0].startswith('\\char') and c[0][5:].isdigit():
self.pos += delta
return unichr(int(c[0][5:]))
# nothing matches, just pass through token as-is
self.pos += 1
return self[-1]
def candidates(self,offset):
"""Generate pairs delta,c where c is a token or tuple of tokens from tex
(after deleting extraneous brackets starting at pos) and delta
is the length of the tokens prior to bracket deletion.
"""
t = self[offset]
if t in _blacklist:
return
elif t == '{':
for delta,c in self.candidates(offset+1):
if self[offset+delta+1] == '}':
yield delta+2,c
elif t == '\\mbox':
for delta,c in self.candidates(offset+1):
yield delta+1,c
elif t == '$' and self[offset+2] == '$':
yield 3, (t,self[offset+1],t)
else:
q = self[offset+1]
if q == '{' and self[offset+3] == '}':
yield 4, (t,self[offset+2])
elif q:
yield 2, (t,q)
yield 1, t
latex_equivalents = {
0x0009: ' ',
0x000a: '\n',
0x0023: '{\#}',
0x0026: '{\&}',
0x00a0: '{~}',
0x00a1: '{!`}',
0x00a2: '{\\not{c}}',
0x00a3: '{\\pounds}',
0x00a7: '{\\S}',
0x00a8: '{\\"{}}',
0x00a9: '{\\copyright}',
0x00af: '{\\={}}',
0x00ac: '{\\neg}',
0x00ad: '{\\-}',
0x00b0: '{\\mbox{$^\\circ$}}',
0x00b1: '{\\mbox{$\\pm$}}',
0x00b2: '{\\mbox{$^2$}}',
0x00b3: '{\\mbox{$^3$}}',
0x00b4: "{\\'{}}",
0x00b5: '{\\mbox{$\\mu$}}',
0x00b6: '{\\P}',
0x00b7: '{\\mbox{$\\cdot$}}',
0x00b8: '{\\c{}}',
0x00b9: '{\\mbox{$^1$}}',
0x00bf: '{?`}',
0x00c0: '{\\`A}',
0x00c1: "{\\'A}",
0x00c2: '{\\^A}',
0x00c3: '{\\~A}',
0x00c4: '{\\"A}',
0x00c5: '{\\AA}',
0x00c6: '{\\AE}',
0x00c7: '{\\c{C}}',
0x00c8: '{\\`E}',
0x00c9: "{\\'E}",
0x00ca: '{\\^E}',
0x00cb: '{\\"E}',
0x00cc: '{\\`I}',
0x00cd: "{\\'I}",
0x00ce: '{\\^I}',
0x00cf: '{\\"I}',
0x00d1: '{\\~N}',
0x00d2: '{\\`O}',
0x00d3: "{\\'O}",
0x00d4: '{\\^O}',
0x00d5: '{\\~O}',
0x00d6: '{\\"O}',
0x00d7: '{\\mbox{$\\times$}}',
0x00d8: '{\\O}',
0x00d9: '{\\`U}',
0x00da: "{\\'U}",
0x00db: '{\\^U}',
0x00dc: '{\\"U}',
0x00dd: "{\\'Y}",
0x00df: '{\\ss}',
0x00e0: '{\\`a}',
0x00e1: "{\\'a}" | ,
0x00e2: '{\\^a}',
0x00e3: '{\\~a}',
0x00e4: '{\\"a}',
0x00e5: '{\\aa}',
0x00e6: '{\\ae}',
0x00e7: '{\\c{c}}',
0x00e8: '{\ | \`e}',
0x00e9: "{\\'e}",
0x00ea: '{\\^e}',
0x00eb: '{\\"e}',
0x00ec: '{\\`\\i}',
0x00ed: "{\\'\\i}",
0x00ee: '{\\^\\i}',
0x00ef: '{\\"\\i}',
0x00f1: '{\\~n}',
0x00f2: '{\\`o}',
0x00f3: "{\\'o}",
0x00f4: '{\\^o}',
0x00f5: '{\\~o}',
0x00f6: '{\\"o}',
0x00f7: '{\\mbox{$\\div$}}',
0x00f8: '{\\o}',
0x00f9: '{\\`u}',
0x00fa: "{\\'u}",
0x00fb: '{\\^u}',
0x00fc: '{\\"u}',
0x00fd: "{\\'y}",
0x00ff: '{\\"y}',
0x0100: '{\\=A}',
0x0101: '{\\=a}',
0x0102: '{\\u{A}}',
0x0103: '{\\u{a}}',
0x0104: '{\\c{A}}',
0x0105: '{\\c{a}}',
0x0106: "{\\'C}",
0x0107: "{\\'c}",
0x0108: "{\\^C}",
0x0109: "{\\^c}",
0x010a: "{\\.C}",
0x010b: "{\\.c}",
0x010c: "{\\v{C}}",
0x010d: "{\\v{c}}",
0x010e: "{\\v{D}}",
0x010f: "{\\v{d}}",
0x0112: '{\\=E}',
0x0113: '{\\=e}',
0x0114: '{\\u{E}}',
0x0115: '{\\u{e}}',
0 |
Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
'''
TZlibTransport provides a compressed transport and transport factory
class, using the python standard library zlib module to implement
data compression.
'''
from __future__ import division
import zlib
from cStringIO import StringIO
from TTransport import TTransportBase, CReadableTransport
class TZlibTransportFactory(object):
'''
Factory transport that builds zlib compressed transports.
This factory caches the last single client/transport that it was passed
and returns the same TZlibTransport object that was created.
This caching means the TServer class will get the _same_ transport
object for both input and output transports from this factory.
(For non-threaded scenarios only, since the cache only holds one object)
The purpose of this caching is to allocate only one TZlibTransport where
only one is really needed (since it must have separate read/write buffers),
and makes the statistics from getCompSavings() and getCompRatio()
easier to understand.
'''
# class scoped cache of last transport given and zlibtransport returned
_last_trans = None
_last_z = None
def getTransport(self, trans, compresslevel=9):
'''Wrap a transport , trans, with the TZlibTransport
compressed transport class, returning a new
transport to the caller.
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Defaults to 9.
@type compresslevel: int
This method returns a TZlibTransport which wraps the
passed C{trans} TTransport derived instance.
'''
if trans == self._last_trans:
return self._last_ | z
ztrans = TZlibTransport(trans, compresslevel)
self._last_trans = trans
sel | f._last_z = ztrans
return ztrans
class TZlibTransport(TTransportBase, CReadableTransport):
'''
Class that wraps a transport with zlib, compressing writes
and decompresses reads, using the python standard
library zlib module.
'''
# Read buffer size for the python fastbinary C extension,
# the TBinaryProtocolAccelerated class.
DEFAULT_BUFFSIZE = 4096
def __init__(self, trans, compresslevel=9):
'''
Create a new TZlibTransport, wrapping C{trans}, another
TTransport derived object.
@param trans: A thrift transport object, i.e. a TSocket() object.
@type trans: TTransport
@param compresslevel: The zlib compression level, ranging
from 0 (no compression) to 9 (best compression). Default is 9.
@type compresslevel: int
'''
self.__trans = trans
self.compresslevel = compresslevel
self.__rbuf = StringIO()
self.__wbuf = StringIO()
self._init_zlib()
self._init_stats()
def _reinit_buffers(self):
'''
Internal method to initialize/reset the internal StringIO objects
for read and write buffers.
'''
self.__rbuf = StringIO()
self.__wbuf = StringIO()
def _init_stats(self):
'''
Internal method to reset the internal statistics counters
for compression ratios and bandwidth savings.
'''
self.bytes_in = 0
self.bytes_out = 0
self.bytes_in_comp = 0
self.bytes_out_comp = 0
def _init_zlib(self):
'''
Internal method for setting up the zlib compression and
decompression objects.
'''
self._zcomp_read = zlib.decompressobj()
self._zcomp_write = zlib.compressobj(self.compresslevel)
def getCompRatio(self):
'''
Get the current measured compression ratios (in,out) from
this transport.
Returns a tuple of:
(inbound_compression_ratio, outbound_compression_ratio)
The compression ratios are computed as:
compressed / uncompressed
E.g., data that compresses by 10x will have a ratio of: 0.10
and data that compresses to half of ts original size will
have a ratio of 0.5
None is returned if no bytes have yet been processed in
a particular direction.
'''
r_percent, w_percent = (None, None)
if self.bytes_in > 0:
r_percent = self.bytes_in_comp / self.bytes_in
if self.bytes_out > 0:
w_percent = self.bytes_out_comp / self.bytes_out
return (r_percent, w_percent)
def getCompSavings(self):
'''
Get the current count of saved bytes due to data
compression.
Returns a tuple of:
(inbound_saved_bytes, outbound_saved_bytes)
Note: if compression is actually expanding your
data (only likely with very tiny thrift objects), then
the values returned will be negative.
'''
r_saved = self.bytes_in - self.bytes_in_comp
w_saved = self.bytes_out - self.bytes_out_comp
return (r_saved, w_saved)
def isOpen(self):
'''Return the underlying transport's open status'''
return self.__trans.isOpen()
def open(self):
"""Open the underlying transport"""
self._init_stats()
return self.__trans.open()
def listen(self):
'''Invoke the underlying transport's listen() method'''
self.__trans.listen()
def accept(self):
'''Accept connections on the underlying transport'''
return self.__trans.accept()
def close(self):
'''Close the underlying transport,'''
self._reinit_buffers()
self._init_zlib()
return self.__trans.close()
def read(self, sz):
'''
Read up to sz bytes from the decompressed bytes buffer, and
read from the underlying transport if the decompression
buffer is empty.
'''
ret = self.__rbuf.read(sz)
if len(ret) > 0:
return ret
# keep reading from transport until something comes back
while True:
if self.readComp(sz):
break
ret = self.__rbuf.read(sz)
return ret
def readComp(self, sz):
'''
Read compressed data from the underlying transport, then
decompress it and append it to the internal StringIO read buffer
'''
zbuf = self.__trans.read(sz)
zbuf = self._zcomp_read.unconsumed_tail + zbuf
buf = self._zcomp_read.decompress(zbuf)
self.bytes_in += len(zbuf)
self.bytes_in_comp += len(buf)
old = self.__rbuf.read()
self.__rbuf = StringIO(old + buf)
if len(old) + len(buf) == 0:
return False
return True
def write(self, buf):
'''
Write some bytes, putting them into the internal write
buffer for eventual compression.
'''
self.__wbuf.write(buf)
def flush(self):
'''
Flush any queued up data in the write buffer and ensure the
compression buffer is flushed out to the underlying transport
'''
wout = self.__wbuf.getvalue()
if len(wout) > 0:
zbuf = self._zcomp_write.compress(wout)
self.bytes_out += len(wout)
self.bytes_out_comp += len(zbuf)
else:
zbuf = ''
ztail = self._zcomp_write.flush(zlib.Z_SYNC_FLUSH)
self.bytes_out_comp += len(ztail)
if (len(zbuf) + len(ztail)) > 0:
self.__wbuf = StringIO()
self.__trans.write(zbuf + ztail)
self.__trans.flush()
@property
def cstringio_buf(self):
'''Implement the CReadableTransport interface'''
return self.__rbuf
def cstringio_refill(self, partialread, reqlen):
'''Implement the CReadableTransport interface for refill'''
retstring = partialread
if reqlen < self.DEFAULT_BUFFSIZE:
retstring += self.read(self.DEFAULT_BUFFSIZE)
while len(retstring) < reqlen:
retstring += self.read(reqlen - len(retstring))
self.__rbuf = StringIO(retstring)
return self.__rb |
"""
WSGI config for appscake project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsg | i process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi impor | t get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
|
_ | _version_info__ = (2, 4, 2)
_ | _version__ = ".".join(map(str, __version_info__))
|
for i in range(1, 101):
print i
asd = open("inp | /" + str(i), "r")
s = asd.read()
s = s[:-1]
n = int(s)
print n
if n % 100 != 0:
if n % 4 == 0:
s = "EVET" + "\n"
else:
s = "HAYIR" + "\n"
else:
if n % 400 == 0:
s = "EVET" + "\n"
print "400e bolunme!!!"
else:
s = "HAYIR" + "\n"
print "100 e bolnmede hayir"
asd.close()
asd = open("out | /" + str(i), "w")
asd.write(s)
asd.close()
|
from OpenGLCffi.GLES3 import params
@params(api='gles3', prms=['first', 'count', 'v'])
def glViewportArrayvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'x', 'y', 'w', 'h'])
def glViewportIndexedfNV(index, x, y, w, h):
pass
@params(api='gles3', prms=['index', 'v'])
def glViewportIndexedfvNV(index, v):
pass
@params(api='gles3', prms=['first', 'count', 'v'])
def glScissorArrayvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'left', 'bottom', 'width', 'height'])
def glScissorIndexedNV(index, left, bottom, width, height):
pass
@params(api='gles3', prms=['index', 'v'])
def glScissorIndexedvNV(index, v):
pass
@params(api='gles3', prms=['first', 'count', 'v'])
def glDepthRangeArrayfvNV(first, count, v):
pass
@params(api='gles3', prms=['index', 'n', 'f'])
def glDep | thRangeIndexedfNV(index, n, f):
pass
@params(api='gles3', prms=['target', 'index', 'data'])
def glGetFloati_vNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
de | f glEnableiNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glDisableiNV(target, index):
pass
@params(api='gles3', prms=['target', 'index'])
def glIsEnablediNV(target, index):
pass
|
ser creation with invalid password")
# Invite Form
class InviteUserForm(forms.Form):
email = forms.EmailField()
first_name = forms.CharField()
last_name = forms.CharField()
def send_invite(self, usertype, request):
first_name = self.cleaned_data['first_name']
last_name = self.cleaned_data['last_name']
name = first_name + ' ' + last_name
email = self.cleaned_data['email']
host = request.META['HTTP_HOST']
if not re.search(r'http', host):
host = 'http://' + host
link = host + '/register?first_name=' + first_name +'&last_name=' + last_name +'&user_type='+ usertype + '&email=' + email
send_mail('Invite to register for CSS', name + ", you have been invited to register for CSS. Please register using the following link:\n\n "
+ link, 'registration@inviso-css', [self.cleaned_data['email']])
# Registration Form
# @TODO on load, pull fields from query string -> show failure if field not able to be loaded:
# Fields to pull: email, first_name, last_name, user_type
class RegisterUserForm(forms.Form):
first_name = forms.CharField()
last_name = forms.CharField()
email = forms.EmailField()
user_type = forms.CharField()
password1 = forms.CharField(label='Password', widget=forms.PasswordInput)
password2 = forms.CharField(label='Confirm Password', widget=forms.PasswordInput)
def __init__(self, *args, **kwargs):
if kwargs.pop('request') is "GET":
self.first_name = kwargs.pop('first_name')
self.last_name = kwargs.pop('last_name')
self.user_type = kwargs.pop('user_type')
self.email = kwargs.pop('email')
self.declared_fields['first_name'].initial = self.first_name
self.declared_fields['last_name'].initial = self.last_name
self.declared_fields['email'].initial = self.email
self.declared_fields['user_type'].initial = self.user_type
self.declared_fields['user_type'].disabled = True
super(RegisterUserForm, self).__init__(*args,**kwargs)
def save(self):
user = CUser.create(email=self.cleaned_data['email'],
password=self.cleaned_data['password2'],
user_type=self.cleaned_data['user_type'],
first_name=self.cleaned_data['first_name'],
last_name=self.cleaned_data['last_name'])
user.save()
return user
# Edit User Form
class EditUserForm(forms.Form):
user_email = forms.CharField(widget=forms.HiddenInput(), initial='default@email.com')
first_name = forms.CharField()
last_name = forms.CharField()
password = forms.CharField()
def save(self):
user = CUser.get_user(email=self.cleaned_data['user_email'])
user.set_first_name(self.cleaned_data['first_name'])
user.set_last_name(self.cleaned_data['last_name'])
user.set_password(self.cleaned_data['password'])
user.save()
return user
# Delete Form
class DeleteUserForm(forms.Form):
email = forms.CharField(label='Confirm email')
def delete_user(self):
email = self.cleaned_data['email']
User.objects.filter(username = self.cleaned_data['email']).delete()
class AddRoomForm(forms.Form):
name = forms.CharField()
description = forms.CharField()
capacity = forms.IntegerField()
notes = forms.CharField()
equipment = forms.CharField()
def save(self):
room = Room.objects.create(name=self.cleaned_data['name'], description=self.cleaned_data['description'], capacity=self.cleaned_data['capacity'], notes=self.cleaned_data['notes'], equipment=self.cleaned_data['equipment'])
room.save()
return room
class EditRoomForm(forms.Form):
name = forms.CharField(widget=forms.HiddenInput(), initial='defaultRoom')
description = forms.CharField()
capacity = forms.IntegerField()
notes = forms.CharField()
equipment = forms.CharField()
def save(self):
nameString = self.cleaned_data['name']
room = Room.get_room(nameString)
room.name = self.cleaned_data['name']
room.description = self.cleaned_data['description']
room.capacity = self.cleaned_data['capacity']
room.notes = self.cleaned_data['notes']
room.equipment = self.cleaned_data['equipment']
room.save()
class DeleteRoomForm(forms.Form):
roomName = forms.CharField(widget=forms.HiddenInput(), initial='defaultRoom')
def deleteRoom(self):
nameString=self.cleaned_data['roomName']
Room.objects.filter(name=nameString).delete()
class EditCourseSectionTypeForm(forms.Form):
work_units = forms.IntegerField()
work_hours = forms.IntegerField()
def save(self):
name = self.cleaned_data['name']
work_units = self.cleaned_data['work_units']
work_hours = self.cleaned_data['work_hours']
class AddCourseSectionTypeForm(forms.Form):
course = forms.CharField(widget=forms.HiddenInput(), initial='defaultCourse')
name = forms. MultipleChoiceField(
required = True,
widget = forms.RadioSelect,
choices = SectionType.get_all_section_types_list
)
work_units = forms.IntegerField()
work_hours = forms.IntegerField()
class AddCourseForm(forms.Form):
course_name = forms.CharField()
description = forms.CharField()
equipment_req = forms.CharField()
def save(self):
course = Course(name = self.cleaned_data['course_name'],
description = self.cleaned_data['description'],
equipment_req = self.cleaned_data['equipment_req'])
course.save();
class DeleteCourseForm(forms.Form):
course_name = forms.CharField(widget=forms.HiddenInput(), initial='defaultCourse')
def save(self):
course = Course.get_course(name=self.cleaned_data['course_name'])
course.delete()
return
# @TODO Fix naming -> EditCourseForm
class EditCourseForm(forms.Form):
course_name = forms.CharField(widget=forms.HiddenInput(), initial='defaultcourse')
equipment_req = forms.CharField()
description = forms.CharField()
def save(self):
course = Course.get_course(name=self.cleaned_data['course_name'])
course.set_equipment_req(self.cleaned_data['equipment_req'])
course.set_description(self.cleaned_data['description'])
class AddSectionTypeForm(forms.Form):
section_type_name = forms.CharField()
def save(self):
SectionType.create(name=self.cleaned_data['section_type_name'])
# Custom ModelChoiceField for faculty full names
class FacultyModelChoiceField(ModelChoiceField):
def label_from_instance(self, obj):
return obj.user.first_name + " " + obj.user.last_name
class AddSectionForm(forms.Form):
academic_term = forms.ModelChoiceField(label='Term', queryset=Schedule.objects.values_list('academic_term', flat=True), empty_label=" ")
course = forms.ModelChoiceField(label='Course', queryset=Course.objects.values_list('name', flat=True), empty_label=" ")
start_time = forms.TimeField(label='Start Time', input_formats=('%I:%M %p'))
end_time = forms.TimeField(label='End Time', input_formats=('%I:%M %p'))
days = forms.CharField(label='Days')
days = forms.ChoiceField(label='Days', choices=[('MWF', 'MWF'), ('TR', 'TR')])
faculty = FacultyModelChoiceField(label='Faculty', | queryset=CUser.objects.filter(user_type='faculty'))
room = forms.Model | ChoiceField(label='Room', queryset=Room.objects.values_list('name', flat=True), empty_label=" ")
capacity = forms.IntegerField()
section_type = forms.ModelChoiceField(label='Section Type', queryset=SectionType.objects.values_list('name', flat=True), empty_label=" ")
def save(self):
section = Section.create (schedule = Schedule.objects.get(academic_term=self.cleaned_data['academic_term']),
course = Course.objects.get(course=self.cleaned_data['course']),
start_time = self.cl |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import time
from odoo.addons.account.tests.account_test_classes import AccountingTestCase
from odoo.exceptions import ValidationError
class ISRTest(AccountingTestCase):
def create_invoice(self, currency_to_use='base.CHF'):
""" Generates a test invoice """
account_receivable = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_receivable').id)], limit=1)
currency = self.env.ref(currency_to_use)
partner_agrolait = self.env.ref("base.res_partner_2")
product = self.env.ref("product.product_product_4")
account_revenue = self.env['account.account'].search([('user_type_id', '=', self.env.ref('account.data_account_type_revenue').id)], limit=1)
invoice = self.env['account.invoice'].create({
'partner_id': partner_agrolait.id,
'reference_type': 'none',
'currency_id': currency.id,
'name': 'invoice to client',
'account_id': account_receivable.id,
'type': 'out_invoice',
'date_invoice': time.strftime('%Y') + '-12-22',
})
self.env['account.invoice.line'].create({
'product_id': product.id,
'quantity': 1,
'price_unit': 42,
'invoice_id': invoice.id,
'name': 'something',
'account_id': account_revenue.id,
})
invoice.action_invoice_open()
return invoice
def create_account(self, number):
""" Generates a test res.partner.bank. """
return self.env['res.partner.bank'].create({
'acc_number': number
})
def print_isr(self, invoice):
try:
invoice.isr_print()
return True
except ValidationError:
return False
def isr_not_generated(self, invoice):
""" Prints the given invoice and tests that no ISR generation is triggered. """
self.assertFalse(self.print_isr(invoice), 'No ISR should be generated for this invoice')
def isr_generated(self, invoice):
""" Prints the given invoice and tests that an ISR generation is triggered. """
self.assertTrue(self.print_isr(invoice), 'An ISR should have been generated')
def test_l10n_ch_postals(self):
#An account whose number is set to a valid postal number becomes a 'postal'
#account and sets its postal reference field.
account_test_postal_ok = self.create_account('010391391')
self.assertEqual(account_test_postal_ok.acc_type, 'postal', "A valid postal number in acc_number should set its t | ype to 'postal'")
self.assertEqual(account_ | test_postal_ok.l10n_ch_postal, '010391391', "A postal account should have a postal reference identical to its account number")
#An account whose number is set to a non-postal value should not get the
#'postal' type
account_test_postal_wrong = self.create_account('010391394')
self.assertNotEqual(account_test_postal_wrong.acc_type, 'postal', "A non-postal account cannot be of type 'postal'")
#A swiss IBAN account contains a postal reference
account_test_iban_ok = self.create_account('CH6309000000250097798')
self.assertEqual(account_test_iban_ok.acc_type, 'iban', "The IBAN must be valid")
self.assertEqual(account_test_iban_ok.l10n_ch_postal, '000250097798', "A valid swiss IBAN should set the postal reference")
#A non-swiss IBAN must not allow the computation of a postal reference
account_test_iban_wrong = self.create_account('GR1601101250000000012300695')
self.assertEqual(account_test_iban_wrong.acc_type, 'iban', "The IBAN must be valid")
self.assertFalse(account_test_iban_wrong.l10n_ch_postal, "A valid swiss IBAN should set the postal reference")
def test_isr(self):
#Let us test the generation of an ISR for an invoice, first by showing an
#ISR report is only generated when Odoo has all the data it needs.
invoice_1 = self.create_invoice('base.CHF')
self.isr_not_generated(invoice_1)
#Now we add an account for payment to our invoice, but still cannot generate the ISR
test_account = self.create_account('250097798')
invoice_1.partner_bank_id = test_account
self.isr_not_generated(invoice_1)
#Finally, we add bank coordinates to our account. The ISR should now be available to generate
test_bank = self.env['res.bank'].create({
'name':'Money Drop',
'l10n_ch_postal_chf':'010391391'
})
test_account.bank_id = test_bank
self.isr_generated(invoice_1)
#Now, let us show that, with the same data, an invoice in euros does not generate any ISR (because the bank does not have any EUR postal reference)
invoice_2 = self.create_invoice('base.EUR')
invoice_2.partner_bank_id = test_account
self.isr_not_generated(invoice_2)
|
from os import makedirs
from os.path import join
from posix import listdir
from django.conf import settings
from django.core.management.base import BaseCommand
from libavwrapper.avconv import Input, Output, AVConv
from libavwrapper.codec import AudioCodec, NO_VIDEO
from 匯入.族語辭典 import 代碼對應
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument(
'語言',
type=str,
| help='選擇的族語'
)
def handle(self, *args, **參數):
# 檢查avconv有裝無
代碼 = 代碼對應[參數['語言']]
語料目錄 = join(settings.BASE_DIR, '語料', '族語辭典', 代碼)
目標目錄 = join(settings.BASE_DIR, '語料', '族語辭典wav', 代碼)
makedirs(目標目錄, exist_ok=True)
for 檔名 in sorted(listdir(語料目錄)):
if 檔名.endswith('.mp3'):
來源 = join(語料目錄, 檔名)
目標 = join(目標目錄, 檔名[:-4] + '.wav')
目標聲音 | 格式 = AudioCodec('pcm_s16le')
目標聲音格式.channels(1)
目標聲音格式.frequence(16000)
原始檔案 = Input(來源)
網頁檔案 = Output(目標).overwrite()
指令 = AVConv('avconv', 原始檔案, 目標聲音格式, NO_VIDEO, 網頁檔案)
程序 = 指令.run()
程序.wait()
|
"""Tests for distutils.command.check."""
import os
import textwrap
import unittest
from test.support import run_unittest
from distutils.command.check import check, HAS_DOCUTILS
from distutils.tests import support
from distutils.errors import DistutilsSetupError
try:
import pygments
except ImportError:
pygments = None
HERE = os.path.dirname(__file__)
class CheckTestCase(support.LoggingSilencer,
support.TempdirManager,
unittest.TestCase):
def _run(self, metadata=None, cwd=None, **options):
if metadata is None:
metadata = {}
if cwd is not None:
old_dir = os.getcwd()
os.chdir(cwd)
| pkg_info, dist = self.create_dist(**metadata)
cmd = check(dist)
cmd.initialize_options()
for name, value in options.items():
setattr(cmd, name, value)
cmd.ensure_finalized()
cmd.run()
if cwd is not None:
os.chdir(old_dir)
return cmd
def test_check_metadata(self):
# let's run the command with no metadata at all
# by default, check is checking the metadata
# should have some | warnings
cmd = self._run()
self.assertEqual(cmd._warnings, 2)
# now let's add the required fields
# and run it again, to make sure we don't get
# any warning anymore
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
# now with the strict mode, we should
# get an error if there are missing metadata
self.assertRaises(DistutilsSetupError, self._run, {}, **{'strict': 1})
# and of course, no error when all metadata are present
cmd = self._run(metadata, strict=1)
self.assertEqual(cmd._warnings, 0)
# now a test with non-ASCII characters
metadata = {'url': 'xxx', 'author': '\u00c9ric',
'author_email': 'xxx', 'name': 'xxx',
'version': 'xxx',
'description': 'Something about esszet \u00df',
'long_description': 'More things about esszet \u00df'}
cmd = self._run(metadata)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_document(self):
pkg_info, dist = self.create_dist()
cmd = check(dist)
# let's see if it detects broken rest
broken_rest = 'title\n===\n\ntest'
msgs = cmd._check_rst_data(broken_rest)
self.assertEqual(len(msgs), 1)
# and non-broken rest
rest = 'title\n=====\n\ntest'
msgs = cmd._check_rst_data(rest)
self.assertEqual(len(msgs), 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext(self):
# let's see if it detects broken rest in long_description
broken_rest = 'title\n===\n\ntest'
pkg_info, dist = self.create_dist(long_description=broken_rest)
cmd = check(dist)
cmd.check_restructuredtext()
self.assertEqual(cmd._warnings, 1)
# let's see if we have an error with strict=1
metadata = {'url': 'xxx', 'author': 'xxx',
'author_email': 'xxx',
'name': 'xxx', 'version': 'xxx',
'long_description': broken_rest}
self.assertRaises(DistutilsSetupError, self._run, metadata,
**{'strict': 1, 'restructuredtext': 1})
# and non-broken rest, including a non-ASCII character to test #12114
metadata['long_description'] = 'title\n=====\n\ntest \u00df'
cmd = self._run(metadata, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
# check that includes work to test #31292
metadata['long_description'] = 'title\n=====\n\n.. include:: includetest.rst'
cmd = self._run(metadata, cwd=HERE, strict=1, restructuredtext=1)
self.assertEqual(cmd._warnings, 0)
@unittest.skipUnless(HAS_DOCUTILS, "won't test without docutils")
def test_check_restructuredtext_with_syntax_highlight(self):
# Don't fail if there is a `code` or `code-block` directive
example_rst_docs = []
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code:: python
def foo():
pass
"""))
example_rst_docs.append(textwrap.dedent("""\
Here's some code:
.. code-block:: python
def foo():
pass
"""))
for rest_with_code in example_rst_docs:
pkg_info, dist = self.create_dist(long_description=rest_with_code)
cmd = check(dist)
cmd.check_restructuredtext()
msgs = cmd._check_rst_data(rest_with_code)
if pygments is not None:
self.assertEqual(len(msgs), 0)
else:
self.assertEqual(len(msgs), 1)
self.assertEqual(
str(msgs[0][1]),
'Cannot analyze code. Pygments package not found.'
)
def test_check_all(self):
metadata = {'url': 'xxx', 'author': 'xxx'}
self.assertRaises(DistutilsSetupError, self._run,
{}, **{'strict': 1,
'restructuredtext': 1})
def test_suite():
return unittest.makeSuite(CheckTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
|
# coding=utf-8
"""This module, code_section.py, is an abstraction for code sections. Needed for ordering code chunks."""
class CodeSection(object):
"""Represents a single code section of a source code file."""
def __init__(self, section_name):
self._section_name = section_name
self._code_chunks = []
def add_code_chunk_at_start(self, code_chunk):
"""Adds a code chunk to the start of this code section."""
self._code_chunks.insert(0, code_chunk)
def add_code_chunk(self, code_chunk):
"""Adds a code chunk to this code section."" | "
self._code_chunks.append(code_chunk)
def get_all_code_chunks(self):
"""Returns a list of all the code chunks in this code section."""
return self._code_chunks
@property
def empty(self) -> bool:
"""Returns a boolean indicating if this code section is empty or not."""
return len(self._code_c | hunks) == 0
@property
def name(self) -> str:
"""Returns the name of this code section."""
return self._section_name
|
on:
def forwards(self, orm):
# Adding model 'TaggedSentence'
db.create_table('tagged_sentences', (
('text', orm['corpus.TaggedSentence:text']),
('language', orm['corpus.TaggedSentence:language']),
('sentence', orm['corpus.TaggedSentence:sentence']),
))
db.send_create_signal('corpus', ['TaggedSentence'])
# Adding model 'Language'
db.create_table('corpus_language', (
('id', orm['corpus.Language:id']),
('name', orm['corpus.Language:name']),
('sentence_count', orm['corpus.Language:sentence_count']),
))
db.send_create_signal('corpus', ['Language'])
# Adding model 'DependencyParse'
db.create_table('dependency_parses', (
('id', orm['corpus.DependencyParse:id']),
('sentence', orm['corpus.DependencyParse:sentence']),
('linktype', orm['corpus.DependencyParse:linktype']),
('word1', orm['corpus.DependencyParse:word1']),
('word2', orm['corpus.DependencyParse:word2']),
('index1', orm['corpus.DependencyParse:index1']),
('index2', orm['corpus.DependencyParse:index2']),
))
db.send_create_signal('corpus', ['DependencyParse'])
# Adding model 'Sentence'
db.create_table('sentences', (
('id', orm['corpus.Sentence:id']),
('text', orm['corpus.Sentence:text']),
('creator', orm['corpus.Sentence:creator']),
('created_on', orm['corpus.Sentence:created_on']),
('language', orm['corpus.Sentence:language']),
('activity', orm['corpus.Sentence:activity']),
('score', orm['corpus.Sentence:score']),
))
db.send_create_signal('corpus', ['Sentence'])
def backwards(self, orm):
# Deleting model 'TaggedSentence'
db.delete_table('tagged_sentences')
# Deleting model 'Language'
db.delete_table('corpus_language')
# Deleting model 'DependencyParse'
db.delete_table('dependency_parses')
# Deleting model 'Sentence'
db.delete_table('sentences')
models = {
'auth.group': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'})
} | ,
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),) | "},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'corpus.dependencyparse': {
'Meta': {'db_table': "'dependency_parses'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index1': ('django.db.models.fields.IntegerField', [], {}),
'index2': ('django.db.models.fields.IntegerField', [], {}),
'linktype': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'sentence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Sentence']"}),
'word1': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'word2': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'corpus.language': {
'id': ('django.db.models.fields.CharField', [], {'max_length': '16', 'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'sentence_count': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'corpus.sentence': {
'Meta': {'db_table': "'sentences'"},
'activity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['events.Activity']"}),
'created_on': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Language']"}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'votes': ('django.contrib.contenttypes.generic.GenericRelation', [], {'to': "orm['voting.Vote']"})
},
'corpus.taggedsentence': {
'Meta': {'db_table': "'tagged_sentences'"},
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Language']"}),
'sentence': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['corpus.Sentence']", 'primary_key': 'True'}),
'text': ('django.db.models.fields.TextField', [], {})
},
'events.activity': {
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.TextField', [], {})
},
'voting.vote': {
'Meta': {'unique_together': "(('user', 'content_type', 'object_id'),)", 'db_table': "'votes'"},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'vote': ('django.db.models.field |
he above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Utility scripts for selenium.
A collection of utility scripts for selenium test cases to use.
"""
import os
import re
import time
import unittest
import base64
import gflags
import selenium_constants
FLAGS = gflags.FLAGS
SUFFIXES = ["small", "medium", "large"]
def IsValidTestType(test_type):
"""Returns True if test_type is a "small", "medium" or "large"."""
return test_type.lower() in SUFFIXES
def | IsValidSuffix(name):
"""Returns True if name ends in a valid test type."""
name = name.lower()
for suffix in SUFFIXES:
i | f name.endswith(suffix):
return True
return False
def ScreenshotNameFromTestName(name):
name = StripTestTypeSuffix(name)
if name.startswith("Test"):
# Make sure these are in order.
prefixes = ["TestStress", "TestSample", "Test"]
for prefix in prefixes:
if name.startswith(prefix):
name = name[len(prefix):]
break
# Lowercase the name only for custom test methods.
name = name.lower()
name = name.replace("_", "-")
name = name.replace("/", "_")
return name
def StripTestTypeSuffix(name):
"""Removes the suffix from name if it is a valid test type."""
name_lower = name.lower()
for suffix in SUFFIXES:
if name_lower.endswith(suffix):
return name[:-len(suffix)]
return name
def GetArgument(string):
"""Returns the value inside the first set of parentheses in a string.
Args:
string: String in the format "identifier(args)"
Returns:
args from string passed in. None if there were no parentheses.
"""
match = re.match("\w+\(([^)]+)\)", string)
if match:
return match.group(1)
return None
def TakeScreenShot(session, browser, client, filename):
"""Takes a screenshot of the o3d display buffer.
This function is the preferred way to capture an image of the plugin.
Uses gflags:
If gflags.FLAGS.screenshots is False then screen shots will not be taken.
gflags.FLAGS.screenshotsdir must be set to the path to save screenshots in.
Args:
session: Selenium session.
browser: Name of the browser running the test.
client: String that in javascript will return the o3d client.
filename: Name of screenshot.
Returns:
success: True on success, False on failure.
"""
# If screenshots enabled
if gflags.FLAGS.screenshots:
full_path = os.path.join(os.getcwd(),
FLAGS.screenshotsdir,
filename)
return TakeScreenShotAtPath(session,
browser,
client,
full_path)
else:
# Screenshots not enabled, return true (success).
return True
def TakeScreenShotAtPath(session,
browser,
client,
filename):
"""Takes a screenshot of the o3d display buffer.
This should be used by tests that need to specify exactly where to save the
image or don't want to use gflags.
Args:
session: Selenium session.
browser: Name of the browser running the test.
client: String that in javascript will return the o3d client.
filename: Full path to screenshot to be saved.
Returns:
success: True on success, False on failure.
"""
session.window_focus()
# Resize window, and client area if needed.
session.run_script(
"(function() {\n"
" var needResize = false;\n"
" var divs = window.document.getElementsByTagName('div');\n"
" for (var ii = 0; ii < divs.length; ++ii) {\n"
" var div = divs[ii];\n"
" if (div.id && div.id == 'o3d') {\n"
" var widthSpec = div.style.width;\n"
" if (widthSpec.indexOf('%') >= 0) {\n"
" div.style.width = '800px';\n"
" div.style.height = '600px';\n"
" needResize = true;\n"
" break;\n"
" }\n"
" }\n"
" }\n"
" window.o3d_seleniumNeedResize = needResize;\n"
"} ());\n")
need_client_resize = (
session.get_eval("window.o3d_seleniumNeedResize") == "true")
if need_client_resize:
session.wait_for_condition(
"window.%s.width == 800 && window.%s.height == 600" % (client, client),
20000)
else:
session.run_script("window.resizeTo(%d, %d)" %
(selenium_constants.RESIZE_WIDTH,
selenium_constants.RESIZE_HEIGHT))
# Execute screenshot capture code
# Replace all backslashes with forward slashes so it is parsed correctly
# by Javascript
full_path = filename.replace("\\", "/")
# Attempt to take a screenshot of the display buffer
eval_string = ("%s.toDataURL()" % client)
# Set Post render call back to take screenshot
script = ["window.g_selenium_post_render = false;",
"window.g_selenium_save_screen_result = false;",
"var frameCount = 0;",
"%s.setPostRenderCallback(function() {" % client,
" ++frameCount;",
" if (frameCount >= 3) {",
" %s.clearPostRenderCallback();" % client,
" window.g_selenium_save_screen_result = %s;" % eval_string,
" window.g_selenium_post_render = true;",
" } else {",
" %s.render()" % client,
" }",
"})",
"%s.render()" % client]
session.run_script("\n".join(script))
# Wait for screenshot to be taken.
session.wait_for_condition("window.g_selenium_post_render", 20000)
# Get result
data_url = session.get_eval("window.g_selenium_save_screen_result")
expected_header = "data:image/png;base64,"
if data_url.startswith(expected_header):
png = base64.b64decode(data_url[len(expected_header):])
file = open(full_path + ".png", 'wb')
file.write(png)
file.close()
return True
return False
class SeleniumTestCase(unittest.TestCase):
"""Wrapper for TestCase for selenium."""
def __init__(self, name, browser, path_to_html, test_type=None,
sample_path=None, options=None):
"""Constructor for SampleTests.
Args:
name: Name of unit test.
session: Selenium session.
browser: Name of browser.
path_to_html: path to html from server root
test_type: Type of test ("small", "medium", "large")
sample_path: Path to test.
load_timeout: Time to wait for page to load (ms).
run_timeout: Time to wait for test to run.
options: list of option strings.
"""
unittest.TestCase.__init__(self, name)
self.name = name
self.session = None
self.browser = browser
self.test_type = test_type
self.sample_path = sample_path
self.path_to_html = path_to_html
self.screenshots = []
self.load_timeout = 10000
self.run_timeout = None
self.client = "g_client"
# parse options
for option in options:
if option.startswith("screenshots"):
for i in range(int(GetArgument(option))):
self.screenshots.append("27.5")
elif option.startswith("screenshot"): |
#!/usr/bin/env python
#
# C | opyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
from adb_profile_chrome import main
|
if __name__ == '__main__':
sys.exit(main.main())
|
# Script executed by jython
# Can import any Java package
from org.cs | studio.display.builder.runtime.script import PVUtil
# Can also import some python code that's available under Jython
import sys, time
trigger = PVUtil.getInt(pvs[0])
if trigger:
info = | "%s,\ninvoked at %s" % (sys.version, time.strftime("%Y-%m-%d %H:%M:%S"))
widget.setPropertyValue("text", info)
|
import pytest
from .addons import using_networkx
from .utils import *
import math
import numpy as np
import qcelemental as qcel
import psi4
from psi4.driver import qcdb
pytestmark = pytest.mark.quick
def hide_test_xtpl_fn_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy('cbs', scf_basis='cc-pvdz', scf_scheme=psi4.driver_cbs.xtpl_highest_1)
assert 'Replace extrapolation function with function name' in str(e.value)
def hide_test_xtpl_cbs_fn_error():
psi4.geometry('He')
with pytest.raises(psi4.UpgradeHelper) as e:
psi4.energy(psi4.cbs, scf_basis='cc-pvdz')
#psi4.energy(psi4.driver.driver_cbs.complete_basis_set, scf_basis='cc-pvdz')
assert 'Replace cbs or complete_basis_set function with cbs string' in str(e.value)
@pytest.mark.parametrize("inp,out", [
((2, 'C2V'), 2),
(('A2', 'c2v'), 2),
(('2', 'C2V'), 2),
])
def test_parse_cotton_irreps(inp, out):
idx = psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert idx == out
@pytest.mark.parametrize("inp", [
((5, 'cs')),
(('5', 'cs')),
((0, 'cs')),
(('a2', 'cs')),
])
def test_parse_cotton_irreps_error(inp):
with pytest.raises(psi4.ValidationError) as e:
psi4.driver.driver_util.parse_cotton_irreps(*inp)
assert 'not valid for point group' in str(e.value)
# <<< TODO Deprecated! Delete in Psi4 v1.5 >>>
@using_networkx
def test_deprecated_qcdb_align_b787():
soco10 = """
O 1.0 0.0 0.0
C 0.0 0.0 0.0
O -1.0 0.0 0.0
units ang
"""
sooc12 = """
O 1.2 4.0 0.0
O -1.2 4.0 0.0
C 0.0 4.0 0.0
units ang
"""
ref_rmsd = math.sqrt(2. * 0.2 * 0.2 / 3.) # RMSD always in Angstroms
oco10 = qcel.molparse.from_string(soco10)
oco12 = qcel.molparse.from_string(sooc12)
|
oco10_geom_au = oco10['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
oco12_geom | _au = oco12['qm']['geom'].reshape((-1, 3)) / qcel.constants.bohr2angstroms
with pytest.warns(FutureWarning) as err:
rmsd, mill = qcdb.align.B787(
oco10_geom_au, oco12_geom_au, np.array(['O', 'C', 'O']), np.array(['O', 'O', 'C']), verbose=4, do_plot=False)
assert compare_values(ref_rmsd, rmsd, 6, 'known rmsd B787')
def test_deprecated_qcdb_align_scramble():
with pytest.warns(FutureWarning) as err:
mill = qcdb.align.compute_scramble(4, do_resort=False, do_shift=False, do_rotate=False, deflection=1.0, do_mirror=False)
assert compare_arrays([0,1,2,3], mill.atommap, 4, 'atommap')
# <<< TODO Deprecated! Delete when the error messages are removed. >>>
def test_deprecated_dcft_calls():
psi4.geometry('He')
err_substr = "All instances of 'dcft' should be replaced with 'dct'."
driver_calls = [psi4.energy, psi4.optimize, psi4.gradient, psi4.hessian, psi4.frequencies]
for call in driver_calls:
with pytest.raises(psi4.UpgradeHelper) as e:
call('dcft', basis='cc-pvdz')
assert err_substr in str(e.value)
# The errors trapped below are C-side, so they're nameless, Py-side.
with pytest.raises(Exception) as e:
psi4.set_module_options('dcft', {'e_convergence': 9})
assert err_substr in str(e.value)
with pytest.raises(Exception) as e:
psi4.set_module_options('dct', {'dcft_functional': 'odc-06'})
assert err_substr in str(e.value)
|
import csv
import re
from io import TextIOWrapper
from django.conf import settings
from django.core.cache import cache
from django.utils.termcolors import colorize
# Import clog if we're in debug otherwise make it a noop
if settings.DEBUG:
from clog.clog import clog
else:
def clog(*args, **kwargs):
pass
def pop_first(data, key):
"""Pop the given key from the given `data` dict, and if the popped item
is a list, return the first element. This is handy for those cases where,
in the api, `request.data.pop(whatever)` sometimes gives a list and other
times is an object.
"""
result = data.pop(key)
if isinstance(result, list):
result = result[0]
return result
def num_user_selections(obj):
"""Return a count of the given object's UserXXXX instances (where XXXX is
the name of one of our content models). This will tell how many users
have selected this item.
Valid for Category, Goal, Action instances.
"""
model = obj._meta.model_name
if model not in ['category', 'goal', 'action']:
raise ValueError("{0} is not a supported object type".format(model))
method = "user{0}_set".format(model)
return getattr(obj, method).count()
# ------------------------------------------
#
# Helper functions for cleaning text content
#
# ------------------------------------------
def clean_title(text):
"""Titles: collapse all whitespace, remove ending periods, strip."""
if text:
text = re.sub(r'\s+', ' ', text).strip() # collapse whitespace
if text.endswith("."):
text = text[:-1]
return text
def clean_notification(text):
"""Notification text: collapse all whitespace, strip, include an ending
period (if not a ? or a !).
"""
if text:
text = re.sub(r'\s+', ' ', text).strip() # collapse whitespace
if text[-1] not in ['.', '?', '!']:
text += "."
return text
def strip(text):
"""Conditially call text.strip() if the input text is truthy."""
if text:
text = text.strip()
return text
def read_uploaded_csv(uploaded_file, encoding='utf-8', errors='ignore'):
"""This is a generator that takes an uploaded file (such as an instance of
InMemoryUploadedFile.file), converts it to a string (instead of bytes)
representation, then parses it as a CSV.
Returns a list of lists containing strings, and removes any empty rows.
NOTES:
1. This makes a big assumption about utf-8 encodings, and the errors
param means we potentially lose data!
2. InMemoryUploadedFileSee: http://stackoverflow.com/a/16243182/182778
"""
file = TextIOWrapper(
uploaded_file.file,
encod | ing=encoding,
newline='',
errors=errors
)
for row in csv.reader(file):
if any(row):
yield row
def delete_content(prefix):
"""Delete content whose title/name starts with the given prefix."""
from goals.models import Action, Category, Goal, Trigger
print("Deleting content that startswith='{}'".format(prefix))
actions = Action.ob | jects.filter(title__startswith=prefix)
print("Deleting {} Actions...".format(actions.count()))
actions.delete()
triggers = Trigger.objects.filter(name__startswith=prefix)
print("Deleting {} Triggers...".format(triggers.count()))
triggers.delete()
goals = Goal.objects.filter(title__startswith=prefix)
print("Deleting {} Goals...".format(goals.count()))
goals.delete()
cats = Category.objects.filter(title__startswith=prefix)
print("Deleting {} Categories...".format(cats.count()))
cats.delete()
print("...done.")
|
from font import font
class zschemaname( font ):
"""
Displays a header name for a Z Schema. It may contain text, images,
equations, etc... but the width of it should be kept to a minimum so
it isn't wider than the containing Z Schema box. See
<a href="zschema.html"><zschema></a> for proper usage.
"""
def __init__( self, *args ):
"""
Initiate the container, contents, and properties.
-*args, arguments for the for constructor.
"""
apply( font.__init__, (self,) + args )
self.setColorDefined( self.hasProperty("color") )
def render( self, app, x, y ):
"""
-app, SlideApplication object
-x, x coordinate to start drawing at
-y, y coordinate to start drawing at
Returns x, y coordinates where the rendering left off.
"""
#
# Don't draw anything it this isn't a direct child of a
# <zschema> tag in the XML document.
#
from zschema import zschema
if not isinstance(self.getContainer(), zschema):
return x, y
if not self.colorDefined():
borderQColor = self.getContainer().getBorderColor()
self.setProperty( "color", str(borderQColor.name()) )
container = self.getContainer()
self.setProperty( "marginleft",
container.getProperty("marginleft") + \
container.getProperty("cellspacing") + \
16 )
| self.setProperty( "marginright",
| app.getWidth() - \
(x + container.getWidth()) + \
self.getProperty("cellpadding") + \
16)
x = self.getProperty( "marginleft" )
return font.render( self, app, x, y )
def move( self, x, y ):
"""
"""
x = x + 16
font.move( self, x, y )
def setColorDefined( self, colorDefined ):
"""
"""
self.__colorDefined = colorDefined
def colorDefined( self ):
"""
"""
try:
return self.__colorDefined
except AttributeError:
self.__colorDefined = false
return false
def getHtml( self ):
"""
Get the HTML associated with this object.
Returns a list of html strings, with each entry being a line
in a html file.
"""
return font.getHtml( self )
|
import unittest
RESOURCE_NAME = 'Test_Resource'
class | BaseEndpointTest(unittest.TestCase):
def setUp(self):
self.endpoint = None
self.resources = {}
self.template = {
'Resources': self.resources
| }
def test_resource_name(self):
if self.endpoint:
resource_name = self.endpoint.resource_name(RESOURCE_NAME)
self.assertEquals(self.topic_resource(), resource_name)
def subscriptions(self):
resource_name = self.topic_resource()
return self.resources[resource_name]['Properties']['Subscription']
def topic_resource(self):
raise ValueError('Must override topic_resource.')
|
#This file is part of Tryton & Nereid. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
import imp
from flask.config import ConfigAttribute, Config as ConfigBase # noqa
class Config(ConfigBase):
"Configuration without the root_path"
def __init__(self, defaults=None):
dict.__init__(self, defaults or {})
def from_pyfile(self, filename):
"""
Updates the values in the config from a Python file. This function
| behaves as if the file was imported as module with the
:meth:`from_object` function.
:param filename: the filename of the config. This can either be an
absolute filename or a filename relative to the
root path.
"""
d = imp.new_module('config')
d.__file__ = filename
try:
execfile(filename, d.__dict__)
except IOError, e:
e.strerror = 'Unable to lo | ad configuration file (%s)' % e.strerror
raise
self.from_object(d)
|
#!/usr/bin/env python
"""
demos reading HiST camera parameters from XML file
"""
from histutils.hstxmlparse impor | t xmlparam
from a | rgparse import ArgumentParser
if __name__ == "__main__":
p = ArgumentParser()
p.add_argument("fn", help="xml filename to parse")
p = p.parse_args()
params = xmlparam(p.fn)
print(params)
|
from __future__ import print_function
import os
import time
import json
import datetime
import argparse
import requests
from message import Message
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-f','--format', default="protobuf", choices=["protob | uf","json"], help="message format")
parser.add_argument('CONFIG', type=argparse.FileType('r'), help="configuration file")
parser.add_argument('URL', help="tavis url")
args = parser.parse_args()
config = json.load(args.CONFIG)
shar | ed_secret = config.get('shared_secret', 'ultrasafesecret')
msg = Message(random_payload=False)
msg.time = int(time.time())
resp = requests.post(args.URL, headers={
'X-Hamustro-Time': msg.time,
'X-Hamustro-Signature': msg.signature(shared_secret, args.format),
'Content-Type': 'application/{}; charset=utf-8'.format(args.format)
}, data=msg.get_body(args.format))
print('Response code: {}'.format(resp.status_code)) |
"""Support for Geofency."""
import logging
from aiohttp import web
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_NAME,
CONF_WEBHOOK_ID,
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_NOT_HOME,
)
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.util import slugify
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_MOBILE_BEACONS = "mobile_beacons"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Optional(CONF_MOBILE_BEACONS, default=[]): vol.All(
cv.ensure_list, [cv.string]
)
}
| )
},
extra=vol.ALLOW_EXTRA,
)
ATTR_ADDRESS = "address"
ATTR_BEACON_ID = "beaconUUID"
ATTR_CURRENT_LATITUDE = "currentLatitude"
ATTR_CURRENT_LONGITUDE = "currentLongitude"
ATTR_DEVICE = "device"
ATTR_ENTRY = "entry"
BEACON_DEV_PREFIX = "beacon"
L | OCATION_ENTRY = "1"
LOCATION_EXIT = "0"
TRACKER_UPDATE = f"{DOMAIN}_tracker_update"
def _address(value: str) -> str:
r"""Coerce address by replacing '\n' with ' '."""
return value.replace("\n", " ")
WEBHOOK_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, _address),
vol.Required(ATTR_DEVICE): vol.All(cv.string, slugify),
vol.Required(ATTR_ENTRY): vol.Any(LOCATION_ENTRY, LOCATION_EXIT),
vol.Required(ATTR_LATITUDE): cv.latitude,
vol.Required(ATTR_LONGITUDE): cv.longitude,
vol.Required(ATTR_NAME): vol.All(cv.string, slugify),
vol.Optional(ATTR_CURRENT_LATITUDE): cv.latitude,
vol.Optional(ATTR_CURRENT_LONGITUDE): cv.longitude,
vol.Optional(ATTR_BEACON_ID): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, hass_config):
"""Set up the Geofency component."""
config = hass_config.get(DOMAIN, {})
mobile_beacons = config.get(CONF_MOBILE_BEACONS, [])
hass.data[DOMAIN] = {
"beacons": [slugify(beacon) for beacon in mobile_beacons],
"devices": set(),
"unsub_device_tracker": {},
}
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook from Geofency."""
try:
data = WEBHOOK_SCHEMA(dict(await request.post()))
except vol.MultipleInvalid as error:
return web.Response(text=error.error_message, status=HTTP_UNPROCESSABLE_ENTITY)
if _is_mobile_beacon(data, hass.data[DOMAIN]["beacons"]):
return _set_location(hass, data, None)
if data["entry"] == LOCATION_ENTRY:
location_name = data["name"]
else:
location_name = STATE_NOT_HOME
if ATTR_CURRENT_LATITUDE in data:
data[ATTR_LATITUDE] = data[ATTR_CURRENT_LATITUDE]
data[ATTR_LONGITUDE] = data[ATTR_CURRENT_LONGITUDE]
return _set_location(hass, data, location_name)
def _is_mobile_beacon(data, mobile_beacons):
"""Check if we have a mobile beacon."""
return ATTR_BEACON_ID in data and data["name"] in mobile_beacons
def _device_name(data):
"""Return name of device tracker."""
if ATTR_BEACON_ID in data:
return f"{BEACON_DEV_PREFIX}_{data['name']}"
return data["device"]
def _set_location(hass, data, location_name):
"""Fire HA event to set location."""
device = _device_name(data)
async_dispatcher_send(
hass,
TRACKER_UPDATE,
device,
(data[ATTR_LATITUDE], data[ATTR_LONGITUDE]),
location_name,
data,
)
return web.Response(text=f"Setting location for {device}", status=HTTP_OK)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "Geofency", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, DEVICE_TRACKER)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
hass.data[DOMAIN]["unsub_device_tracker"].pop(entry.entry_id)()
await hass.config_entries.async_forward_entry_unload(entry, DEVICE_TRACKER)
return True
# pylint: disable=invalid-name
async_remove_entry = config_entry_flow.webhook_async_remove_entry
|
{'comment': {'handle': 'matt@example.com',
'id': 2603645287324504065, |
'message': 'I think differently now.',
'resource': | '/api/v1/comments/2603645287324504065',
'url': '/event/jump_to?event_id=2603645287324504065'}}
|
# -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import Warning
import logging
_logger = logging.getLogger(__name__)
class afip_incoterm(models.Model):
_name = 'afip.incoterm'
_description = 'Afip Incoterm'
afip_code = fields.Char(
'Code', required=True)
name = fields.Char(
'Name', required=True)
class afip_point_ | of_sale(models.Model):
_name = 'afip.point_of_sale'
_description = 'Afip Point Of Sale'
prefix = fields.Char(
'Prefix'
)
sufix = fields.Char(
'Sufix'
)
type = fields.Selection([
('manual', 'Manual'),
('preprinted', 'Preprinted'),
('online', 'Online'),
# Agregados por otro modulo
# ('electronic', 'Electronic'),
# ('f | iscal_printer', 'Fiscal Printer'),
],
'Type',
default='manual',
required=True,
)
name = fields.Char(
compute='get_name',
)
number = fields.Integer(
'Number', required=True
)
company_id = fields.Many2one(
'res.company', 'Company', required=True,
default=lambda self: self.env['res.company']._company_default_get(
'afip.point_of_sale')
)
journal_ids = fields.One2many(
'account.journal',
'point_of_sale_id',
'Journals',
)
document_sequence_type = fields.Selection(
[('own_sequence', 'Own Sequence'),
('same_sequence', 'Same Invoice Sequence')],
string='Document Sequence Type',
default='own_sequence',
required=True,
help="Use own sequence or invoice sequence on Debit and Credit Notes?"
)
journal_document_class_ids = fields.One2many(
'account.journal.afip_document_class',
compute='get_journal_document_class_ids',
string='Documents Classes',
)
@api.one
@api.depends('type', 'sufix', 'prefix', 'number')
def get_name(self):
# TODO mejorar esto y que tome el lable traducido del selection
if self.type == 'manual':
name = 'Manual'
elif self.type == 'preprinted':
name = 'Preimpresa'
elif self.type == 'online':
name = 'Online'
elif self.type == 'electronic':
name = 'Electronica'
if self.prefix:
name = '%s %s' % (self.prefix, name)
if self.sufix:
name = '%s %s' % (name, self.sufix)
name = '%04d - %s' % (self.number, name)
self.name = name
@api.one
@api.depends('journal_ids.journal_document_class_ids')
def get_journal_document_class_ids(self):
journal_document_class_ids = self.env[
'account.journal.afip_document_class'].search([
('journal_id.point_of_sale_id', '=', self.id)])
self.journal_document_class_ids = journal_document_class_ids
_sql_constraints = [('number_unique', 'unique(number, company_id)',
'Number Must be Unique per Company!'), ]
class afip_document_class(models.Model):
_name = 'afip.document_class'
_description = 'Afip Document Class'
name = fields.Char(
'Name', size=120)
doc_code_prefix = fields.Char(
'Document Code Prefix', help="Prefix for Documents Codes on Invoices \
and Account Moves. For eg. 'FA ' will build 'FA 0001-0000001' Document Number")
afip_code = fields.Integer(
'AFIP Code', required=True)
document_letter_id = fields.Many2one(
'afip.document_letter', 'Document Letter')
report_name = fields.Char(
'Name on Reports',
help='Name that will be printed in reports, for example "CREDIT NOTE"')
document_type = fields.Selection([
('invoice', 'Invoices'),
('credit_note', 'Credit Notes'),
('debit_note', 'Debit Notes'),
('receipt', 'Receipt'),
('ticket', 'Ticket'),
('in_document', 'In Document'),
('other_document', 'Other Documents')
],
string='Document Type',
help='It defines some behaviours on automatic journal selection and\
in menus where it is shown.')
active = fields.Boolean(
'Active', default=True)
class afip_document_letter(models.Model):
_name = 'afip.document_letter'
_description = 'Afip Document letter'
name = fields.Char(
'Name', size=64, required=True)
afip_document_class_ids = fields.One2many(
'afip.document_class', 'document_letter_id', 'Afip Document Classes')
issuer_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_issuer_rel',
'letter_id', 'responsability_id', 'Issuers',)
receptor_ids = fields.Many2many(
'afip.responsability', 'afip_doc_letter_receptor_rel',
'letter_id', 'responsability_id', 'Receptors',)
active = fields.Boolean(
'Active', default=True)
vat_discriminated = fields.Boolean(
'Vat Discriminated on Invoices?',
help="If True, the vat will be discriminated on invoice report.")
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'), ]
class afip_responsability(models.Model):
_name = 'afip.responsability'
_description = 'AFIP VAT Responsability'
name = fields.Char(
'Name', size=64, required=True)
code = fields.Char(
'Code', size=8, required=True)
active = fields.Boolean(
'Active', default=True)
issued_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_issuer_rel',
'responsability_id', 'letter_id', 'Issued Document Letters')
received_letter_ids = fields.Many2many(
'afip.document_letter', 'afip_doc_letter_receptor_rel',
'responsability_id', 'letter_id', 'Received Document Letters')
vat_tax_required_on_sales_invoices = fields.Boolean(
'VAT Tax Required on Sales Invoices?',
help='If True, then a vay tax is mandatory on each sale invoice for companies of this responsability',
)
_sql_constraints = [('name', 'unique(name)', 'Name must be unique!'),
('code', 'unique(code)', 'Code must be unique!')]
class afip_document_type(models.Model):
_name = 'afip.document_type'
_description = 'AFIP document types'
name = fields.Char(
'Name', size=120, required=True)
code = fields.Char(
'Code', size=16, required=True)
afip_code = fields.Integer(
'AFIP Code', required=True)
active = fields.Boolean(
'Active', default=True)
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
import cx_Oracle
from airflow.providers.google.cloud.transfers.oracle_to_gcs import OracleToGCSOperator
TASK_ID = 'test-oracle-to-gcs'
ORACLE_CONN_ID = 'oracle_conn_test'
SQL = 'select 1'
BUCKET = 'gs://test'
JSON_FILENAME = 'test_{}.ndjson'
GZIP = False
ROWS = [('mock_row_content_1', 42), ('mock_row_content_2', 43), ('mock_row_content_3', 44)]
CURSOR_DESCRIPTION = (
('some_str', cx_Oracle.DB_TYPE_VARCHAR, None, None, None, None, None),
('some_num', cx_Oracle.DB_TYPE_NUMBER, None, None, None, None, None),
)
NDJSON_LINES = [
b'{"some_num": 42, "some_str": "mock_row_content_1"}\n',
b'{"some_num": 43, "some_str": "mock_row_content_2"}\n',
b'{"some_num": 44, "some_str": "mock_row_content_3"}\n',
]
SCHEMA_FILENAME = 'schema_test.json'
SCHEMA_JSON = [
b'[{"mode": "NULLABLE", "name": "some_str", "type": "STRING"}, ',
b'{"mode": "NULLABLE", "name": "some_num", "type": "NUMERIC"}]',
]
class TestOracleToGoogleCloudStorageOperator(unittest.TestCase):
def test_init(self):
"""T | est OracleToGoogleCloudStorageOperator instance is properly initialized."""
op = OracleToGCSOperator(task_id=TASK_ID, sql=SQL, | bucket=BUCKET, filename=JSON_FILENAME)
assert op.task_id == TASK_ID
assert op.sql == SQL
assert op.bucket == BUCKET
assert op.filename == JSON_FILENAME
@mock.patch('airflow.providers.google.cloud.transfers.oracle_to_gcs.OracleHook')
@mock.patch('airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook')
def test_exec_success_json(self, gcs_hook_mock_class, oracle_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = OracleToGCSOperator(
task_id=TASK_ID, oracle_conn_id=ORACLE_CONN_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME
)
oracle_hook_mock = oracle_hook_mock_class.return_value
oracle_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
oracle_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
assert BUCKET == bucket
assert JSON_FILENAME.format(0) == obj
assert 'application/json' == mime_type
assert GZIP == gzip
with open(tmp_filename, 'rb') as file:
assert b''.join(NDJSON_LINES) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
oracle_hook_mock_class.assert_called_once_with(oracle_conn_id=ORACLE_CONN_ID)
oracle_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch('airflow.providers.google.cloud.transfers.oracle_to_gcs.OracleHook')
@mock.patch('airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook')
def test_file_splitting(self, gcs_hook_mock_class, oracle_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
oracle_hook_mock = oracle_hook_mock_class.return_value
oracle_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
oracle_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b''.join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False):
assert BUCKET == bucket
assert 'application/json' == mime_type
assert GZIP == gzip
with open(tmp_filename, 'rb') as file:
assert expected_upload[obj] == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op = OracleToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]),
)
op.execute(None)
@mock.patch('airflow.providers.google.cloud.transfers.oracle_to_gcs.OracleHook')
@mock.patch('airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook')
def test_schema_file(self, gcs_hook_mock_class, oracle_hook_mock_class):
"""Test writing schema files."""
oracle_hook_mock = oracle_hook_mock_class.return_value
oracle_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
oracle_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip):
if obj == SCHEMA_FILENAME:
with open(tmp_filename, 'rb') as file:
assert b''.join(SCHEMA_JSON) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op = OracleToGCSOperator(
task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME, schema_filename=SCHEMA_FILENAME
)
op.execute(None)
# once for the file and once for the schema
assert 2 == gcs_hook_mock.upload.call_count
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
"""
Get package download statistics from PyPI
"""
# Based on https://github.com/collective/Products.PloneSoftwareCenter\
# /commit/601558870175e35cfa4d05fb309859e580271a1f
# For sorting XML-RPC results
from collections import deque
# HTTPS connection for normalize function
try:
from http.client import HTTPSConnection
except ImportError:
from httplib import HTTPSConnection
import json
from datetime import datetime
# PyPI's XML-RPC methods
# https://wiki.python.org/moin/PyPIXmlRpc
try:
import xmlrpc.client as xmlrpc
except ImportError: # Python 2
import xmlrpclib as xmlrpc
PYPI_HOST = 'pypi.python.org'
PYPI_URL = 'https://%s/pypi' % PYPI_HOST
PYPI_JSON = '/'.join([PYPI_URL, '%s/json'])
PYPI_XML = xmlrpc.ServerProxy(PYPI_URL)
# PyPI JSON
# http://stackoverflow.com/a/28786650
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
def by_two(source):
"""
"""
out = []
for x in source:
out.append(x)
if len(out) == 2:
yield out
out = []
def count_downloads(package, version=None, json=False):
"""
"""
count = 0
items = []
for urls, data in get_release_info([package], json=json):
for url in urls:
filename = url['filename']
downloads = url['downloads']
if not json:
upload_time = url['upload_time'].timetuple()
upload_time = datetime.strftime('%Y-%m-%d', upload_time).date()
else:
# Convert 2011-04-14T02:16:55 to 2011-04-14
upload_time = url['upload_time'].split('T')[0]
upload_time = datetime.strptime(upload_time, '%Y-%m-%d').date()
if version == data['version'] or not version:
items.append(
{
'upload_time': upload_time,
'filename': filename,
'downloads': downloads,
}
)
count += url['downloads']
return count, items
# http://stackoverflow.com/a/28786650
def get_jsonparsed_data(url):
"""Receive the content of ``url``, parse it as JSON and return the
object.
"""
response = urlopen(url)
data = response.read().decode('utf-8')
return json.loads(data)
def normalize(name):
"""
"""
http = HTTPSConnection(PYPI_HOST)
http.request('HEAD', '/pypi/%s/' % name)
r = http.getresponse()
if r.status not in (200, 301):
raise ValueError(r.reason)
return r.getheader('location', name).split('/')[-1]
def get_releases(packages):
"""
"""
mcall = xmlrpc.MultiCall(PYPI_XML)
called_packages = deque()
for package in packages:
mcall.package_releases(package, True)
called_packages.append(package)
if len(called_packages) == 100:
result = mcall()
mcall = xmlrpc.MultiCall(PYPI_XML)
for releases in result:
yield called_packages.popleft(), releases
result = mcall()
for releases in result:
yield called_packages.popleft(), releases
def get_release_info(packages, json=False):
"""
"""
if json:
for package in packages:
data = get_jsonparsed_ | data(PYPI_JSON % package)
for release in data['releases']:
urls = data['releases'][release]
yield urls, data['info']
return
mcall = xmlrpc.M | ultiCall(PYPI_XML)
i = 0
for package, releases in get_releases(packages):
for version in releases:
mcall.release_urls(package, version)
mcall.release_data(package, version)
i += 1
if i % 50 == 49:
result = mcall()
mcall = xmlrpc.MultiCall(PYPI_XML)
for urls, data in by_two(result):
yield urls, data
result = mcall()
for urls, data in by_two(result):
yield urls, data
def get_stats(package):
"""
Fetch raw statistics of a package, no corrections are made to this
data. You should use get_corrected_stats().
"""
grand_total = 0
if '==' in package:
package, version = package.split('==')
try:
package = normalize(package)
version = None
except ValueError:
raise RuntimeError('No such module or package %r' % package)
# Count downloads
total, releases = count_downloads(
package,
json=True,
version=version,
)
result = {
'version': version,
'releases': releases,
}
grand_total += total
return result, grand_total, version
def get_corrected_stats(package, use_honeypot=True):
"""
Fetches statistics for `package` and then corrects them using a special
honeypot.
"""
honeypot, __, __ = get_stats('python-bogus-project-honeypot')
if not honeypot:
raise RuntimeError("Could not get honeypot data")
honeypot = honeypot['releases']
# Add a field used to store diff when choosing the best honey pot release
# for some statistic
for x in honeypot:
x['diff'] = 0
stats, __, version = get_stats(package)
if not stats:
return
# Denote release date diff and choose the honey pot release that's closest
# to the one of each release
releases = stats['releases']
for release in releases:
# Sort by absolute difference
honeypot.sort(key=lambda x: abs(
(x['upload_time'] - release['upload_time']).total_seconds()
))
# Multiple candidates
honeypot_filtered = list(filter(lambda x: x['diff'] == honeypot[0]['diff'], honeypot))
average_downloads = sum([x['downloads'] for x in honeypot_filtered]) / len(honeypot_filtered)
release['downloads'] = release['downloads'] - average_downloads
# Re-calculate totals
total_count = sum([x['downloads'] for x in releases])
return stats, total_count, version
|
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright owner | ship. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See | the License for the
# specific language governing permissions and limitations
# under the License.
#
"""Tool to gauge message passing throughput and latencies"""
import logging
import optparse
import time
import uuid
import pyngus
from proton import Message
from utils import connect_socket
from utils import get_host_port
from utils import process_connection
LOG = logging.getLogger()
LOG.addHandler(logging.StreamHandler())
class ConnectionEventHandler(pyngus.ConnectionEventHandler):
def __init__(self):
super(ConnectionEventHandler, self).__init__()
def connection_failed(self, connection, error):
"""Connection has failed in some way."""
LOG.warn("Connection failed callback: %s", error)
def connection_remote_closed(self, connection, pn_condition):
"""Peer has closed its end of the connection."""
LOG.debug("connection_remote_closed condition=%s", pn_condition)
connection.close()
class SenderHandler(pyngus.SenderEventHandler):
def __init__(self, count):
self._count = count
self._msg = Message()
self.calls = 0
self.total_ack_latency = 0.0
self.stop_time = None
self.start_time = None
def credit_granted(self, sender_link):
if self.start_time is None:
self.start_time = time.time()
self._send_message(sender_link)
def _send_message(self, link):
now = time.time()
self._msg.body = {'tx-timestamp': now}
self._last_send = now
link.send(self._msg, self)
def __call__(self, link, handle, status, error):
now = time.time()
self.total_ack_latency += now - self._last_send
self.calls += 1
if self._count:
self._count -= 1
if self._count == 0:
self.stop_time = now
link.close()
return
self._send_message(link)
def sender_remote_closed(self, sender_link, pn_condition):
LOG.debug("Sender peer_closed condition=%s", pn_condition)
sender_link.close()
def sender_failed(self, sender_link, error):
"""Protocol error occurred."""
LOG.debug("Sender failed error=%s", error)
sender_link.close()
class ReceiverHandler(pyngus.ReceiverEventHandler):
def __init__(self, count, capacity):
self._count = count
self._capacity = capacity
self._msg = Message()
self.receives = 0
self.tx_total_latency = 0.0
def receiver_active(self, receiver_link):
receiver_link.add_capacity(self._capacity)
def receiver_remote_closed(self, receiver_link, pn_condition):
"""Peer has closed its end of the link."""
LOG.debug("receiver_remote_closed condition=%s", pn_condition)
receiver_link.close()
def receiver_failed(self, receiver_link, error):
"""Protocol error occurred."""
LOG.warn("receiver_failed error=%s", error)
receiver_link.close()
def message_received(self, receiver, message, handle):
now = time.time()
receiver.message_accepted(handle)
self.tx_total_latency += now - message.body['tx-timestamp']
self.receives += 1
if self._count:
self._count -= 1
if self._count == 0:
receiver.close()
return
lc = receiver.capacity
cap = self._capacity
if lc < (cap / 2):
receiver.add_capacity(cap - lc)
def main(argv=None):
_usage = """Usage: %prog [options]"""
parser = optparse.OptionParser(usage=_usage)
parser.add_option("-a", dest="server", type="string",
default="amqp://0.0.0.0:5672",
help="The address of the server [amqp://0.0.0.0:5672]")
parser.add_option("--node", type='string', default='amq.topic',
help='Name of source/target node')
parser.add_option("--count", type='int', default=100,
help='Send N messages (send forever if N==0)')
parser.add_option("--debug", dest="debug", action="store_true",
help="enable debug logging")
parser.add_option("--trace", dest="trace", action="store_true",
help="enable protocol tracing")
opts, _ = parser.parse_args(args=argv)
if opts.debug:
LOG.setLevel(logging.DEBUG)
host, port = get_host_port(opts.server)
my_socket = connect_socket(host, port)
# create AMQP Container, Connection, and SenderLink
#
container = pyngus.Container(uuid.uuid4().hex)
conn_properties = {'hostname': host,
'x-server': False}
if opts.trace:
conn_properties["x-trace-protocol"] = True
c_handler = ConnectionEventHandler()
connection = container.create_connection("perf_tool",
c_handler,
conn_properties)
r_handler = ReceiverHandler(opts.count, opts.count or 1000)
receiver = connection.create_receiver(opts.node, opts.node, r_handler)
s_handler = SenderHandler(opts.count)
sender = connection.create_sender(opts.node, opts.node, s_handler)
connection.open()
receiver.open()
while not receiver.active:
process_connection(connection, my_socket)
sender.open()
# Run until all messages transfered
while not sender.closed or not receiver.closed:
process_connection(connection, my_socket)
connection.close()
while not connection.closed:
process_connection(connection, my_socket)
duration = s_handler.stop_time - s_handler.start_time
thru = s_handler.calls / duration
permsg = duration / s_handler.calls
ack = s_handler.total_ack_latency / s_handler.calls
lat = r_handler.tx_total_latency / r_handler.receives
print("Stats:\n"
" TX Avg Calls/Sec: %f Per Call: %f Ack Latency %f\n"
" RX Latency: %f" % (thru, permsg, ack, lat))
sender.destroy()
receiver.destroy()
connection.destroy()
container.destroy()
my_socket.close()
return 0
if __name__ == "__main__":
main()
|
# # 1. Define a function max() that takes two numbers as arguments and returns the largest of them.
# # Use the if-then-else construct available in Python.
# # (It is true that Python has the max() function built in, but writing it yourself is nevertheless a good exercise.)
#
# def max (a, b):
# if a>b:
# return a
# else:
# return b
#
# print(max(8, 11))
#
# # 6. Define a function sum() and a function multiply() that sums and multiplies (respectively) all the numbers in a list of numbers.
# # For example, sum([1, 2, 3, 4]) should return 10, and multiply([1, 2, 3, 4]) should return 24.
#
# # n+=x means store n + x in n (means n = n + x)
# # n*=x means store n * x in n
# # = is not equals to it is store in
#
# NumList=[1, 2, 3, 4]
#
# def sum (list):
# n=0
# for element in list:
# n+= element
# return n
# print (sum(NumList))
#
# def mult (list):
# n=1
# for element in list:
# n*= element
# return n
# print (mult(NumList))
# 7. Define a function reverse() that computes the reversal of a string (string is a list of characters.
# For example, reverse("I am testing") should return the string "gnitset ma I". (Strings enver need to be reversed, dumb question.
# to do so hwoever, is "snake kobra" [::-1]
print ("snake kobra" [::-1])
pokemon = "snake kobra"
print (pokemon [::-1])
#the follwoing is a more complicated method to teach what each indivual thing in it means
def reverse(list):
length = len(list) # len gets the length of a list
newList = [] # creates a new, empty list
for element in range (0, length): # rangecreates a new list (x, y) from start number (x) to end number (y)
newList.append(list[(length-1) - element]) # "for containerName in" | is a loop method
# .append is add to newList. A list is x long but python coutns starting from 0
| # so length-1 is the position of the last element.
# it is building the list start at element 0
# ending position minus puts the ending element first then the next position is 1
# so it works backwards
return "".join(newList) #join the string in newList as string eg turn ["q","w","x"] into [qwx]
# PList= "snake kobra"
#
# print (PList.reverse ())
#
# # def reverse(list):
# # length = len(list) #len will get the length as a number
# # RevList = [] #Creates a new, empty list
# # for element in range(0, length): #creates a new list (x, y) from x to y
# # tempIndex = (length-1) - element
# # RevList.apend(list(tempIndex))
# # return "".join(RevList)
#Splitting Practise
|
import codecs
input_filename = '/home/jittat/mydoc/directadm53/payment/assignment.csv'
quota_filename = '/home/jittat/mydoc/directadm53 | /payment/quota.txt'
output_filename = '/home/jittat/mydoc/directadm53/payment/assignment-added.csv'
def read_quota():
q_data = {
'nat_id': {},
'firstname': {},
'lastname': {}
}
for l in codecs.open(quota_filename, encoding='utf-8', mode='r').readlines():
items = l.strip().split('\t')
l = l.strip()
if len(items)!=4:
continue
q_data['nat_id'][items[0]] = l
if items[2] in q_data['firstname' | ]:
q_data['firstname'][items[2]].append(l)
else:
q_data['firstname'][items[2]] = [l]
if items[3] in q_data['lastname']:
q_data['lastname'][items[3]].append(l)
else:
q_data['lastname'][items[3]] = [l]
return q_data
def main():
q_data = read_quota()
lines = codecs.open(input_filename, encoding='utf-8', mode='r').readlines()
outfile = codecs.open(output_filename, encoding='utf-8', mode='w')
for l in lines[1:]:
items = l.strip().split(',')
if items[1] in q_data['nat_id']:
print 'OUT:', l.strip()
print >> outfile, l.strip() + ',1,16000'
continue
print >> outfile, l.strip() + ',0,0'
if items[3] in q_data['firstname']:
print 'CHECK-LAST:', l.strip()
for k in q_data['firstname'][items[3]]:
print k
print '------------------'
continue
if items[4] in q_data['lastname']:
print 'CHECK-FIRST:', l.strip()
for k in q_data['lastname'][items[4]]:
print k
print '------------------'
continue
if __name__=='__main__':
main()
|
from __future__ import print_function
import sys
import argparse
import numpy as np
def max3(x, y, z):
return max(max(x, y), z)
def lcs(s1, s2):
m = len(s1)
n = len(s2)
t = np.zeros((n + 2, m + 2), dtype=int)
for j in range(1, n + 1):
for i in range(1, m + 1):
is_same = 0
if s1[i-1] == s2[j-1]:
is_same = 1
t[j, i] = max3(t[j-1, i + 1] + is_same, t[j - 1][i], t[j][i - 1])
return t[n, m]
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='Evaluate conversion results')
parser.add_argument('originals',help='original texts')
parser.add_argument('converted_texts',help='converted texts')
parser.add_argument('--verbose', '-v', type=bool, help='verbose output')
args = parser.parse_args()
lcs_sum = 0
conv_sum = 0
orig_sum = 0
sentences = 0
correct_sentences = 0
with open(args.originals, 'r') as originals:
with open(args.converted_texts, 'r') as converted_texts:
origs = originals
convs = converted_texts
for orig, conv in zip(origs, convs):
orig.strip(' \n')
conv.strip(' \n')
sentences += 1
if orig == conv:
correct_sentences += 1
lcs_len = lcs(orig, conv)
if args.verbose:
print(u'\"{}\", \"{}\", {}'.format(orig, conv, lcs_len), file=stdout)
lcs_sum += lcs_len
conv_sum += len(conv)
orig_sum += len(orig)
| precision = lcs_sum/float(conv_sum)
recall = lcs_sum/float(orig_sum)
f_value = 2 * precision * recall / (precision + recall)
sentence_accuracy = float(correct_sentences) / sentences
if args.verbose:
print(u',,,{},{},{},{}'.format(precision, recall, f_value, sentence_accuracy), file=sys.stdout)
else:
| print(u'{},{},{},{}'.format(precision, recall, f_value, sentence_accuracy), file=sys.stdout)
|
from thrift.protocol import TBinaryProtocol
from thrift.transport import TTransport
import pickle
import bz2
def SerializeThriftMsg(msg, protocol_type=TBinaryProtocol.TBinaryProto | col):
"""Serialize a thrift message using the given protocol.
The default protocol is binary.
Args:
msg: the Thrift object to serialize.
protocol_type: the Thrift protocol class to use.
Returns:
A string of the serialized object.
"""
msg.validate()
transportOut = TTransport.TMemoryBuffer()
| protocolOut = protocol_type(transportOut)
msg.write(protocolOut)
return transportOut.getvalue()
def DeserializeThriftMsg(msg, data,
protocol_type=TBinaryProtocol.TBinaryProtocol):
"""Deserialize a thrift message using the given protocol.
The default protocol is binary.
Args:
msg: the Thrift object to serialize.
data: the data to read from.
protocol_type: the Thrift protocol class to use.
Returns:
Message object passed in (post-parsing).
"""
transportIn = TTransport.TMemoryBuffer(data)
protocolIn = protocol_type(transportIn)
msg.read(protocolIn)
msg.validate()
return msg
def serialize_python(msg):
#return bz2.compress(pickle.dumps(msg))
return pickle.dumps(msg)
def deserialize_python(data):
#return pickle.loads(bz2.decompress(data))
return pickle.loads(data)
|
ask']
DecisionTask = namedtuple('DecisionTask', 'events task_token workflow_id workflow_run_id workflow_type')
"""Contains the metadata to execute a decision task.
See the response syntax in :meth:`~SWF.Client.poll_for_decision_task`.
"""
def nametuplefy(thing):
"""Recursively turns a dict into namedtuples."""
if type(thing) == dict:
# Only supports string keys
Dict = namedtuple('Dict', ' '.join(thing.keys()))
nametuplefied_children = {}
for k, v in thing.items():
nametuplefied_children[k] = nametuplefy(v)
return Dict(**nametuplefied_children)
if type(thing) == list:
return list(map(nametuplefy, thing))
else:
return thing
class DecisionClient(object):
"""A client that provides a pythonic API for polling and responding to decision tasks through an SWF boto3 client.
:param decision_config: Contains SWF values commonly used when making SWF api calls.
:type decision_config: :class:`~py_swf.config_definitions.DecisionConfig`
:param boto_client: A raw SWF boto3 client.
:type boto_client: :class:`~SWF.Client`
"""
def __init__(self, decision_config, boto_client):
self.decision_config = decision_config
self.boto_client = boto_client
def poll(self, identity=None, use_raw_event_history=False):
"""Opens a connection to AWS and long-polls for decision tasks.
When a decision is available, this function will return with exactly one decision task to execute.
Only returns a contiguous subset of the most recent events.
If you want to grab the entire history for a workflow, use :meth:`~py_swf.decision.DecisionClient.walk_execution_history`
Passthrough to :meth:`~SWF.Client.poll_for_decision_task`.
:param identity: A freeform text that identifies the client that performed the longpoll. Useful for debugging history.
:type identity: string
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:return: A decision task to execute.
:rtype: DecisionTask
:raises py_swf.errors.NoTaskFound: Raised when polling for a decision task times out without receiving any tasks.
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=True,
taskList={
'name': self.decision_config.task_list,
},
)
# boto doesn't like None values for optional kwargs
if identity is not None:
kwargs['identity'] = identity
try:
results = self.boto_client.poll_for_decision_task(
**kwargs
)
except ReadTimeout as e:
raise NoTaskFound(e)
# Sometimes SWF gives us an incomplete response, ignore these.
if not results.get('taskToken', None):
raise NoTaskFound('Received results with no taskToken')
events = results['events']
if not use_raw_event_history:
events = nametuplefy(events)
return DecisionTask(
events=events,
task_token=results['taskToken'],
workflow_id=results['workflowExecution']['workflowId'],
workflow_run_id=results['workflowExecution']['runId'],
workflow_type=results['workflowType'],
)
def walk_execution_history(
self,
workflow_id,
workflow_run_id,
reverse_order=True,
use_raw_event_history=False,
maximum_page_size=1000,
):
"""Lazily walks through the entire workflow history for a given workflow_id. This will make successive calls
to SWF on demand when pagination is needed.
See :meth:`~SWF.Client.get_workflow_execution_history` for more information.
:param workflow_id: The workflow_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param workflow_run_id: The workflow_run_id returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param reverse_order: Passthru for reverseOrder to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: bool
:param use_raw_event_history: Whether to use the raw dictionary event history returned from AWS.
Otherwise attempts to turn dictionaries into namedtuples recursively.
:type use_raw_event_history: bool
:param maximum_page_size: Passthru for maximumPageSize to :meth:`~SWF.Client.get_workflow_execution_history`
:type identity: int
:return: A generator that returns successive elements in the workflow execution history.
:rtype: collections.Iterable
"""
kwargs = dict(
domain=self.decision_config.domain,
reverseOrder=reverse_order,
execution=dict(
workflowId=workflow_id,
runId=workflow_run_id,
),
maximumPageSize=maximum_page_size,
)
while True:
results = self.boto_client.get_workflow_execution_history(
**kwargs
)
next_page_token = results.get('nextPageToken', None)
events = results['events']
for event in events:
if not use_raw_event_history:
event = nametuplefy(event)
yield event
if next_page_token is None:
break
kwargs['nextPageToken'] = next_page_token
def finish_decision_with_activity(self, task_token, activity_id, activity_name, activity_version, activity_input):
"""Responds to a given decision task's task_token to schedule an activity task to run.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py_swf.clients.decision.DecisionClient.poll`.
:type identity: string
:param activity_id: A unique identifier for the activity task.
:type identity: string
:param activity_name: Which activity name to execute.
:type identity: string
:param activity_name: Version of the activity name.
:type identity: string
:param activity_input: Freeform text of the input for the activity
:type identity: string
:return: None
:rtype: NoneType
"""
activity_task = build_activity_task(
activity_id,
activity_name,
activity_version,
activity_input,
self.decision_config,
)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[activity_task],
)
def finish_workflow(self, task_token, result):
"""Responds to a given decision task's task_token to finish and terminate the workflow.
Passthrough to :meth:`~SWF.Client.respond_decision_task_completed`.
:param task_token: The task_token returned from :meth:`~py | _swf.clients.decision.Decisi | onClient.poll`.
:type identity: string
:param result: Freeform text that represents the final result of the workflow.
:type identity: string
:return: None
:rtype: NoneType
"""
workflow_complete = build_workflow_complete(result)
self.boto_client.respond_decision_task_completed(
taskToken=task_token,
decisions=[workflow_complete],
)
def build_workflow_complete(result):
return {
'decisionType': 'CompleteWorkflowExecution',
'completeWorkflowExecutionDecisionAttributes': {
'result': result,
},
}
def build_activity_task(activity_id, activity_name, activity_version, input, decision_config):
return {
'decisionType': 'ScheduleActivityTask',
'scheduleActivityTaskDecisionAttributes': {
'acti |
# You can edit these settings and save them, they
# will be applied immediately and remembered for next time.
# This will reset the interpreter.
# ******************************************************************************* #
# If changing these settings makes the interpreter unrecoverable, you #
# can reset to defaults by right clicking and using the silverlight configuration #
# dialog to clear persistent storage for this website. #
# ******************************************************************************* #
import sys
import wpf
from silvershell import utils
# Execute code on UI thread or on background thread
BackgroundExecution = False
# Show CLR tracebacks?
ExceptionDetail = False
# Settings this higher will display more members in the completion list, but will hurt performance
# If the completion list takes too long too show, set this lower.
MaxCompletions = 100
# Setting any of these preferences to None will result in them not being applied
FontSize = 14
FontFamily = wpf.FontFamily('Courier New')
FontWeight = wpf.FontWeights.Bold
Foreground = wpf.brush('#ffffff')
BackgroundMask = wpf.brush('#cc000000')
BackgroundImage = None
TextBoxStyle = utils.load_xaml('''
<Style TargetType="TextBox"
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml">
<Setter Property="Background" Value="Transparent" />
<Setter Property="Padding" Value="0" />
<Setter Property="BorderThickness" Value="0" />
<Setter Property="Template">
<Setter.Value>
<ControlTemplate TargetType="TextBox">
<Border x:Name="ContentElement" Background="{TemplateBinding Background}" Padding="{TemplateBinding Padding}" />
</ControlTemplate>
</Setter.Value>
</Setter>
</Style>
''')
if sys.platform == 'silverlight':
ButtonStyle = utils.load_xaml('''
<Style TargetType="Button"
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
xmlns:vsm="clr-namespace:System.Windows;assembly=System.Windows">
<Setter Property="FontSize" Value="14" />
<Setter Property="FontWeight" Value="Bold" />
<Setter Property="Foreground" Value="White" />
<Setter Property="Background" Value="Transparent" />
<Setter Property="Padding" Value="0" />
<Setter Property="BorderThickness" Value="0" />
<Setter Property="Template">
<Setter.Value>
<ControlTemplate TargetType="Button">
<TextBlock x:Name="RootElement" Text="{TemplateBinding Content}" TextDecorations="Underline">
<vsm:VisualStateManager.VisualStateGroups>
<vsm:VisualStateGroup x:Name="CommonStates">
<vsm:VisualStateGroup.Transitions>
<vsm:VisualTransition To="MouseOver" GeneratedDuration="0:0:0.25" />
</vsm:VisualStateGroup.Transitions>
<vsm:VisualState x:Name="Normal" />
<vsm:VisualState x:Name="MouseOver">
<Storyboard>
<ColorAnimation Storyboard.TargetName="RootElement" Storyboard.TargetProperty="(Control.Foreground).(SolidColorBrush.Color)" To="Yellow" Duration="0" />
</Storyboard>
</vsm:VisualState>
<vsm:VisualState x:Name="Pressed" />
<vsm:VisualState x:Name="Disabled" />
</vsm:VisualStateGroup>
<vsm:VisualStateGroup x:Name="FocusStates">
<vsm:VisualState x:Name= | "Focused" />
<vsm:VisualState x:Name="Unfocused" />
</vsm:VisualStateGroup>
</vsm:VisualStateManager.VisualStateGroup | s>
</TextBlock>
</ControlTemplate>
</Setter.Value>
</Setter>
</Style>
''')
else:
ButtonStyle = None
# These preferences are mandatory, setting them to None is an error
CallTip = utils.load_xaml('''
<Border
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Background="Black"
BorderThickness="2"
Padding="5"
>
<Border.BorderBrush>
<LinearGradientBrush StartPoint="0.5,0" EndPoint="0.5,1">
<GradientStop Color="#B2FFFFFF" Offset="0"/>
<GradientStop Color="#66FFFFFF" Offset="0.325"/>
<GradientStop Color="#1EFFFFFF" Offset="0.325"/>
<GradientStop Color="#51FFFFFF" Offset="1"/>
</LinearGradientBrush>
</Border.BorderBrush>
<TextBlock x:Name="CallTipLabel" TextAlignment="Left" Foreground="White" />
</Border>
''')
MemberList = utils.load_xaml('''
<Border
xmlns="%(client_ns)s"
xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"
Background="Black"
BorderThickness="2"
>
<Border.BorderBrush>
<LinearGradientBrush StartPoint="0.5,0" EndPoint="0.5,1">
<GradientStop Color="#B2FFFFFF" Offset="0"/>
<GradientStop Color="#66FFFFFF" Offset="0.325"/>
<GradientStop Color="#1EFFFFFF" Offset="0.325"/>
<GradientStop Color="#51FFFFFF" Offset="1"/>
</LinearGradientBrush>
</Border.BorderBrush>
<ListBox x:Name="MemberListBox" MaxHeight="240" />
</Border>
''')
CursorAnimation = utils.load_xaml('''
<ColorAnimationUsingKeyFrames
xmlns="%(client_ns)s"
BeginTime="0"
Storyboard.TargetProperty="(Shape.Fill).(SolidColorBrush.Color)"
>
<DiscreteColorKeyFrame Value="Transparent" KeyTime="0:0:0" />
<LinearColorKeyFrame Value="White" KeyTime="0:0:0.35" />
<DiscreteColorKeyFrame Value="White" KeyTime="0:0:0.6" />
</ColorAnimationUsingKeyFrames>
''')
|
n, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: mail
type: notification
short_description: Sends failure events via email
description:
- This callback will report failures via email
version_added: '2.0'
author:
- Dag Wieers (@dagwieers)
requirements:
- whitelisting in configuration
options:
mta:
description: Mail Transfer Agent, server that accepts SMTP
env:
- name: SMTPHOST
ini:
- section: callback_mail
key: smtphost
version_added: '2.5'
default: localhost
mtaport:
description: Mail Transfer Agent Port, port at which server SMTP
ini:
- section: callback_mail
key: smtpport
version_added: '2.5'
default: 25
to:
description: Mail recipient
ini:
- section: callback_mail
key: to
version_added: '2.5'
default: root
sender:
description: Mail sender
ini:
- section: callback_mail
key: sender
version_added: '2.5'
cc:
description: CC'd recipient
ini:
- section: callback_mail
key: cc
version_added: '2.5'
bcc:
description: BCC'd recipient
ini:
- section: callback_mail
key: bcc
version_added: '2.5'
note:
- "TODO: expand configuration options now that plugins can leverage Ansible's configuration"
'''
import json
import os
import re
import smtplib
from ansible.module_utils.six import string_types
from ansible.module_utils._text import to_bytes
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class CallbackModule(CallbackBase):
''' This Ansible callback plugin mails errors to interested parties. '''
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'notification'
CALLBACK_NAME = 'mail'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.sender = None
self.to = 'root'
self.smtphost = os.getenv('SMTPHOST', 'localhost')
self.smtpport = 25
self.cc = None
self.bcc = None
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.sender = self.get_option('sender')
self.to = self.get_option('to')
self.smtphost = self.get_option('mta')
self.smtpport = int(self.get_option('mtaport'))
self.cc = self.get_option('cc')
self.bcc = self.get_option('bcc')
def mail(self, subject='Ansible error mail', body=None):
if body is None:
body = subject
smtp = smtplib.SMTP(self.smtphost, port=self.smtpport)
b_sender = to_bytes(self.sender)
b_to = to_bytes(self.to)
b_cc = to_bytes(self.cc)
b_bcc = to_bytes(self.bcc)
b_subject = to_bytes(subject)
b_body = to_bytes(body)
b_content = b'From: %s\n' % b_sender
b_content += b'To: %s\n' % b_to
if self.cc:
b_content += b'Cc: %s\n' % b_cc
b_content += b'Subject: %s\n\n' % b_subject
b_content += b_body
b_addresses = b_to.split(b',')
if self.cc:
b_addresses += b_cc.split(b',')
if self.bcc:
b_addresses += b_bcc.split(b',')
for b_address in b_addresses:
smtp.sendmail(b_sender, b_address, b_content)
smtp.quit()
def subject_msg(self, multiline, failtype, linenr):
return '%s: %s' % (failtype, multiline.strip('\r\n').splitlines()[linenr])
def indent(self, multiline, indent=8):
return re.sub('^', ' ' * indent, multiline, flags=re.MULTILINE)
def body_blob(self, multiline, texttype):
''' Turn some text output in a well-indented block for sending in a mail body '''
intro = 'with the following %s:\n\n' % texttype
blob = ''
for line in multiline.strip('\r\n').splitlines():
blob += '%s\n' % line
return intro + self.indent(blob) + '\n'
def mail_result(self, result, failtype):
host = result._host.get_name()
if not self.sender:
self.sender = '"Ansible: %s" <root>' % host
# Add subject
if self.itembody:
subject = self.itemsubject
elif result._result.get('failed_when_result') is True:
subject = "Failed due to 'failed_when' condition"
elif result._result.get('msg'):
subject = self.subject_msg(result._result['msg'], failtype, 0)
elif result._result.get('stderr'):
subject = self.subject_msg(result._result['stderr'], failtype, -1)
elif result._result.get('stdout'):
subject = self.subject_msg(result._result['stdout'], failtype, -1)
elif result._result.get('exception'): # Unrelated exceptions are added to output :-/
subject = self.subject_msg(result._result['exception'], failtype, -1)
else:
subject = '%s: %s' % (failtype, result._task.name or result._task.action)
# Make playbook name visible (e.g. in Outlook/Gmail condensed view)
body = 'Playbook: %s\n' % os.path.basename(self.playbook._file_name)
if result._task.name:
body += 'Task: %s\n' % result._task.name
body += 'Module: %s\n' % result._task.action
body += 'Host: %s\n' % host
body += '\n'
| # Add task information (as much as possible)
body += 'The following task failed:\n\n'
if 'invocation' in result._result:
body += self.indent('%s: %s\n' % (result._task.action, json.dumps(result._result['invocation']['module_args'], indent=4)))
elif resu | lt._task.name:
body += self.indent('%s (%s)\n' % (result._task.name, result._task.action))
else:
body += self.indent('%s\n' % result._task.action)
body += '\n'
# Add item / message
if self.itembody:
body += self.itembody
elif result._result.get('failed_when_result') is True:
body += "due to the following condition:\n\n" + self.indent('failed_when:\n- ' + '\n- '.join(result._task.failed_when)) + '\n\n'
elif result._result.get('msg'):
body += self.body_blob(result._result['msg'], 'message')
# Add stdout / stderr / exception / warnings / deprecations
if result._result.get('stdout'):
body += self.body_blob(result._result['stdout'], 'standard output')
if result._result.get('stderr'):
body += self.body_blob(result._result['stderr'], 'error output')
if result._result.get('exception'): # Unrelated exceptions are added to output :-/
body += self.body_blob(result._result['exception'], 'exception')
if result._result.get('warnings'):
for i in range(len(result._result.get('warnings'))):
body += self.body_blob(result._result['warnings'][i], 'exception %d' % (i + 1))
if result._result.get('deprecations'):
for i in range(len(result._result.get('deprecations'))):
body += self.body_blob(result._result['deprecations'][i], 'exception %d' % (i + 1))
body += 'and a complete dump of the error:\n\n'
body += self.indent('%s: %s' % (failtype, json.dumps(result._result, cls=AnsibleJSONEncoder, indent=4)))
self.mail(subject=subject, body=body)
def v2_playbook_on_start(self, playbook):
self.playbook = playbook
self.itembody = ''
def v2_runner_on_failed(self, result, ignore_errors=False):
if ignore_errors:
return
self.mail_result(result, 'Failed')
def v2_runner_on_unreachable(self, result):
self.mail_result(result, 'Unreachable')
def v2_runner_on_async_failed(self, result):
self.mail_result(result, 'Async failure')
def v2_runner_item_on_failed(self, result):
# Pass item information to task failure
self.itemsubject = result._result['msg']
self.itembody += self.body_blob(json.dumps(result._result, cl |
import logging
logging.basicConfig(
level=logging.INFO, format='%(asctime)s|%(name)s|%(levelname)s|%(message)s')
logging.getLogger('vcr.stubs').setLevel(logging.WA | RNING)
logging.getLogger('requests.packages.urllib3.connectionpool')\
.setLevel(logging.WARNING)
def get_logger(*args, **kwargs):
return logging.getLogger(*args, | **kwargs)
|
of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE G | OODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
"""
@authors: Sergei Garbuzov
@status: Development
@version: 1.1.0
"""
import time
from pybvc.controller.controller import Controller
from pybvc.openflowdev.ofswitch import (OFSwitch,
FlowEntry,
Match,
Instruction,
SetMplsTTLAction,
DecMplsTTLAction,
OutputAction)
from pybvc.common.utils import load_dict_from_file
from pybvc.common.status import STATUS
from pybvc.common.constants import ETH_TYPE_MPLS_UCAST
def delete_flows(ofswitch, table_id, flow_ids):
for flow_id in flow_ids:
result = ofswitch.delete_flow(table_id, flow_id)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow with id of '%s' successfully removed "
"from the Controller" % flow_id)
else:
print ("!!!Flow '%s' removal error, reason: %s" %
(flow_id, status.brief()))
def of_demo_42():
f = "cfg.yml"
d = {}
if(load_dict_from_file(f, d) is False):
print("Config file '%s' read error: " % f)
exit(0)
try:
ctrlIpAddr = d['ctrlIpAddr']
ctrlPortNum = d['ctrlPortNum']
ctrlUname = d['ctrlUname']
ctrlPswd = d['ctrlPswd']
nodeName = d['nodeName']
rundelay = d['rundelay']
except:
print ("Failed to get Controller device attributes")
exit(0)
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
print ("<<< Demo 42 Start")
print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<")
ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd)
ofswitch = OFSwitch(ctrl, nodeName)
print ("<<< 'Controller': %s, 'OpenFlow' switch: '%s'" %
(ctrlIpAddr, nodeName))
first_flow_id = 110
# ---------------------------------------------------
# First flow entry
# ---------------------------------------------------
table_id = 0
flow_id = first_flow_id
flow_name = "Modify MPLS TTL example1"
priority = 900
cookie = 1300
match_in_port = 3
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_mod_mpls_ttl = 2
act_out_port = 112
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Set MPLS TTL (%s)\n"
" Output (%s)" %
(act_mod_mpls_ttl, act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry1 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry1.set_flow_table_id(table_id)
flow_entry1.set_flow_name(flow_name)
flow_entry1.set_flow_id(flow_id)
flow_entry1.set_flow_cookie(cookie)
flow_entry1.set_flow_priority(priority)
flow_entry1.set_flow_hard_timeout(0)
flow_entry1.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = SetMplsTTLAction(action_order)
action.set_ttl(act_mod_mpls_ttl)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry1.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry1.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry1.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry1)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
# ---------------------------------------------------
# Second flow entry
# ---------------------------------------------------
table_id = 0
flow_id += 1
flow_name = "Modify MPLS TTL example2"
priority = 900
cookie = 1300
match_in_port = 112
match_eth_type = ETH_TYPE_MPLS_UCAST
match_mpls_label = 567
act_out_port = 3
print "\n"
print ("<<< Set OpenFlow flow on the Controller")
print (" Match: Input Port (%s)\n"
" Ethernet Type (%s)\n"
" MPLS Label (%s)" %
(match_in_port,
hex(match_eth_type),
match_mpls_label))
print (" Actions: Decrement MPLS TTL\n"
" Output (%s)" %
(act_out_port))
time.sleep(rundelay)
# Allocate a placeholder for the Flow Entry
flow_entry2 = FlowEntry()
# Generic attributes of the Flow Entry
flow_entry2.set_flow_table_id(table_id)
flow_entry2.set_flow_name(flow_name)
flow_entry2.set_flow_id(flow_id)
flow_entry2.set_flow_cookie(cookie)
flow_entry2.set_flow_priority(priority)
flow_entry2.set_flow_hard_timeout(0)
flow_entry2.set_flow_idle_timeout(0)
# Instructions/Actions for the Flow Entry
instruction = Instruction(instruction_order=0)
action_order = 0
action = DecMplsTTLAction(action_order)
instruction.add_apply_action(action)
action_order += 1
action = OutputAction(action_order)
action.set_outport(act_out_port)
instruction.add_apply_action(action)
flow_entry2.add_instruction(instruction)
# Match Fields for the Flow Entry
match = Match()
match.set_in_port(match_in_port)
match.set_eth_type(match_eth_type)
match.set_mpls_label(match_mpls_label)
flow_entry2.add_match(match)
print ("\n")
print ("<<< Flow to send:")
print flow_entry2.get_payload()
time.sleep(rundelay)
result = ofswitch.add_modify_flow(flow_entry2)
status = result.get_status()
if(status.eq(STATUS.OK)):
print ("<<< Flow successfully added to the Controller")
else:
print ("\n")
print ("!!!Demo terminated, reason: %s" % status.detailed())
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
exit(0)
print ("\n")
print ("<<< Delete flows from the Controller's cache "
"and from the table '%s' on the '%s' node" % (table_id, nodeName))
time.sleep(rundelay)
delete_flows(ofswitch, table_id, range(first_flow_id, flow_id + 1))
print ("\n")
print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>")
print (">>> Demo |
# -*- coding: utf-8 -*-
'''
test_qgscomposerlabel.py
--------------------------------------
Date : Oct 2012
Copyright : (C) 2012 by Dr. Hugo Mercier
email : hugo dot mercier at oslandia dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* | it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
'''
import qgis |
import unittest
from utilities import getQgisTestApp, unitTestDataPath
from PyQt4.QtCore import QFileInfo, QDate, QDateTime
from qgis.core import QgsVectorLayer, QgsMapLayerRegistry, QgsMapRenderer, QgsComposition, QgsComposerLabel, QgsFeatureRequest, QgsFeature, QgsExpression
QGISAPP, CANVAS, IFACE, PARENT = getQgisTestApp()
class TestQgsComposerLabel(unittest.TestCase):
def testCase(self):
TEST_DATA_DIR = unitTestDataPath()
vectorFileInfo = QFileInfo( TEST_DATA_DIR + "/france_parts.shp")
mVectorLayer = QgsVectorLayer( vectorFileInfo.filePath(), vectorFileInfo.completeBaseName(), "ogr" )
QgsMapLayerRegistry.instance().addMapLayers( [mVectorLayer] )
# create composition with composer map
mMapRenderer = QgsMapRenderer()
layerStringList = []
layerStringList.append( mVectorLayer.id() )
mMapRenderer.setLayerSet( layerStringList )
mMapRenderer.setProjectionsEnabled( False )
mComposition = QgsComposition( mMapRenderer )
mComposition.setPaperSize( 297, 210 )
mLabel = QgsComposerLabel( mComposition )
mComposition.addComposerLabel( mLabel )
self.evaluation_test( mComposition, mLabel )
self.feature_evaluation_test( mComposition, mLabel, mVectorLayer )
self.page_evaluation_test( mComposition, mLabel, mVectorLayer )
def evaluation_test( self, mComposition, mLabel ):
# $CURRENT_DATE evaluation
mLabel.setText( "__$CURRENT_DATE__" )
assert mLabel.displayText() == ( "__" + QDate.currentDate().toString() + "__" )
# $CURRENT_DATE() evaluation
mLabel.setText( "__$CURRENT_DATE(dd)(ok)__" )
expected = "__" + QDateTime.currentDateTime().toString( "dd" ) + "(ok)__"
assert mLabel.displayText() == expected
# $CURRENT_DATE() evaluation (inside an expression)
mLabel.setText( "__[%$CURRENT_DATE(dd) + 1%](ok)__" )
dd = QDate.currentDate().day()
expected = "__%d(ok)__" % (dd+1)
assert mLabel.displayText() == expected
# expression evaluation (without associated feature)
mLabel.setText( "__[%\"NAME_1\"%][%21*2%]__" )
assert mLabel.displayText() == "__[NAME_1]42__"
def feature_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
provider = mVectorLayer.dataProvider()
fi = provider.getFeatures( QgsFeatureRequest() )
feat = QgsFeature()
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
mLabel.setText( "[%\"NAME_1\"||'_ok'%]")
assert mLabel.displayText() == "Basse-Normandie_ok"
fi.nextFeature( feat )
mLabel.setExpressionContext( feat, mVectorLayer )
assert mLabel.displayText() == "Bretagne_ok"
# evaluation with local variables
locs = { "$test" : "OK" }
mLabel.setExpressionContext( feat, mVectorLayer, locs )
mLabel.setText( "[%\"NAME_1\"||$test%]" )
assert mLabel.displayText() == "BretagneOK"
def page_evaluation_test( self, mComposition, mLabel, mVectorLayer ):
mComposition.setNumPages( 2 )
mLabel.setText( "[%$page||'/'||$numpages%]" )
assert mLabel.displayText() == "1/2"
# move the the second page and re-evaluate
mLabel.setItemPosition( 0, 320 )
assert mLabel.displayText() == "2/2"
# use setSpecialColumn
mLabel.setText( "[%$var1 + 1%]" )
QgsExpression.setSpecialColumn( "$var1", 41 )
assert mLabel.displayText() == "42"
QgsExpression.setSpecialColumn( "$var1", 99 )
assert mLabel.displayText() == "100"
QgsExpression.unsetSpecialColumn( "$var1" )
assert mLabel.displayText() == "[%$var1 + 1%]"
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'lassoui.ui'
#
# Created: Sat Apr 11 09:14:27 2015
# by: PyQt4 UI code generator 4.10.4
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName(_fromUtf8("Form"))
Form.resize(235, 342)
self.groupBox = QtGui.QGroupBox(Form)
self.groupBox.setGeometry(QtCore.QRect(10, 10, 211, 61))
self.groupBox.setObjectName(_fromUtf8("groupBox"))
self.lineEdit = QtGui.QLineEdi | t(self.groupBox)
self.lineEdit.setGeometry(QtCore.QRect(40, 20, 141, 20))
self.lineEdit.setObjectName(_fromUtf8("lineEdit"))
self.groupBox_2 = QtGui.QGroupBox(Form)
self.groupBox_2.setGeometry(QtCore.QRect(10, 70, 211, 171))
self.groupBox_2.setObjectName(_fromUtf8("groupBox_2"))
self.label = QtGui.QLabel(self.groupBox_2)
self.label.setGeometry(QtCore.QRect(50, 20, 111, 16))
self.label.setObjectName(_fromUtf8("lab | el"))
self.doubleSpinBox = QtGui.QDoubleSpinBox(self.groupBox_2)
self.doubleSpinBox.setGeometry(QtCore.QRect(110, 20, 62, 22))
self.doubleSpinBox.setObjectName(_fromUtf8("doubleSpinBox"))
self.label_2 = QtGui.QLabel(self.groupBox_2)
self.label_2.setGeometry(QtCore.QRect(30, 60, 111, 16))
self.label_2.setObjectName(_fromUtf8("label_2"))
self.spinBox = QtGui.QSpinBox(self.groupBox_2)
self.spinBox.setGeometry(QtCore.QRect(110, 60, 61, 22))
self.spinBox.setMaximum(10000000)
self.spinBox.setObjectName(_fromUtf8("spinBox"))
self.checkBox = QtGui.QCheckBox(self.groupBox_2)
self.checkBox.setGeometry(QtCore.QRect(30, 90, 81, 17))
self.checkBox.setObjectName(_fromUtf8("checkBox"))
self.checkBox_2 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_2.setGeometry(QtCore.QRect(120, 90, 121, 17))
self.checkBox_2.setObjectName(_fromUtf8("checkBox_2"))
self.checkBox_3 = QtGui.QCheckBox(self.groupBox_2)
self.checkBox_3.setGeometry(QtCore.QRect(30, 120, 81, 17))
self.checkBox_3.setObjectName(_fromUtf8("checkBox_3"))
self.pushButton_3 = QtGui.QPushButton(Form)
self.pushButton_3.setGeometry(QtCore.QRect(40, 280, 161, 23))
self.pushButton_3.setObjectName(_fromUtf8("pushButton_3"))
self.pushButton = QtGui.QPushButton(Form)
self.pushButton.setGeometry(QtCore.QRect(40, 250, 161, 23))
self.pushButton.setObjectName(_fromUtf8("pushButton"))
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
Form.setWindowTitle(_translate("Form", "Form", None))
self.groupBox.setTitle(_translate("Form", "Regressor Name", None))
self.lineEdit.setText(_translate("Form", "LASSO(L1)", None))
self.groupBox_2.setTitle(_translate("Form", "Options", None))
self.label.setText(_translate("Form", "Alpha", None))
self.label_2.setText(_translate("Form", "Max iterations", None))
self.checkBox.setText(_translate("Form", " Normalise", None))
self.checkBox_2.setText(_translate("Form", "Positive", None))
self.checkBox_3.setText(_translate("Form", "Fit intercept", None))
self.pushButton_3.setText(_translate("Form", "Start", None))
self.pushButton.setText(_translate("Form", "Input File", None))
|
from lxml import html
def main():
dom = html.parse(('http://www.amazon.com/Apple-MH0W2LL-10-Inch-Retina-'
| 'Display/dp/B00OTWOAAQ/ref=sr_1_1?s=pc&ie=UTF8&'
'qid=1459799371&sr=1-1&keywords=ipad'))
title = dom.find('//*[@id="productTitle"]')
print(ti | tle.text)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
#
# pKaTool - analysis of systems of titratable groups
# Copyright (C) 2010 Jens Erik Nielsen
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Contact information:
# Email: Jens.Nielsen_at_gmail.com |
# Normal mail:
# Jens Nielsen
# SBBS, Conway I | nstitute
# University College Dublin
# Dublin 4, Ireland
class titration_curve:
def __init__(self,curves):
self.curves=curves.copy()
not_allowed=['pKa','pka']
for key in not_allowed:
if self.curves.has_key(key):
del self.curves[key]
return
#
# ----
#
def __sub__(self,other):
"""subtract two titration curves"""
diff=0.0
for group in self.curves.keys():
if not other.curves.has_key(group):
continue
for ph in self.curves[group].keys():
if other.curves[group].has_key(ph):
diff=diff+abs(self.curves[group][ph]-other.curves[group][ph])
return diff
#
# ----
#
def subtract_individually(self,other):
"""Subtract curves individually"""
diff=[]
for group in self.curves.keys():
if not other.curves.has_key(group):
diff.append(0.0)
continue
this_diff = 0.0
for ph in self.curves[group].keys():
if other.curves[group].has_key(ph):
this_diff=this_diff+abs(self.curves[group][ph]-other.curves[group][ph])
diff.append(this_diff)
return diff
#
# ----
#
def sub_scaled(self,other):
"""scaled difference btw two titration curves"""
diff=0.0
for group in self.curves.keys():
if not other.curves.has_key(group):
continue
raise 'incompatible titration curves'
for ph in self.curves[group].keys():
if other.curves[group].has_key(ph):
diff=diff+self.scale(self.curves[group][ph],other.curves[group][ph])*abs(self.curves[group][ph]-other.curves[group][ph])
return diff
#
# ----
#
def scale(self,frac1,frac2):
"""Scales the error on a titration point """
return max(self.scale_function(frac1),self.scale_function(frac2))
#
# ----
#
def scale_function(self, x):
"""Calculates the scaling functionfor scaling """
return -pow(abs(x)-1,2)+1
#
# ----
#
def experimental_uncertainty(self, pH_uncertainty=0.1):
"""estimates the experimental uncertainty of titration curves"""
res=0.0
count = 0
for group in self.curves.keys():
#print 'Now estimating for ',group
pHs = self.curves[group].keys()
#make sure that ph values are sorted
pHs.sort()
for i in range(len(pHs)):
bw_diff = 0
fw_diff = 0
try:
bw_diff = (self.curves[group][pHs[i]]-self.curves[group][pHs[i-1]])/(pHs[i]-pHs[i-1])
except:
pass
try:
fw_diff = (self.curves[group][pHs[i+1]]-self.curves[group][pHs[i]])/(pHs[i+1]-pHs[i])
except:
pass
avr_diff = (bw_diff+fw_diff)/2 ##### abs()?
res += avr_diff
count += 1
res *= pH_uncertainty
res = abs(res)
avr_res = res / float(count)
return res, avr_res
#
# ----
#
def sub_HHd_scaled(self, exp_data, pkas):
"""Calculates error with scaling based on deviation of exp data from the Henderson-Hasselbalch eq"""
diff=0.0
scales = exp_data.deviation_from_henderson_hasselbalch(pkas)
for group in self.curves.keys():
if not exp_data.curves.has_key(group):
continue
for ph in self.curves[group].keys():
if exp_data.curves[group].has_key(ph):
diff=diff+ scales[group][ph]*abs(self.curves[group][ph]-exp_data.curves[group][ph])
return diff
#
# -----
#
def deviation_from_henderson_hasselbalch(self, pKas):
"""Calculates the deviation from the Henderson-Hasselbalch equation for all points given pKa values"""
HH_deviation = {}
deviation = lambda ph,pka,exp: abs(1/(1+pow(10,ph-pka))-1-exp)
for group in self.curves.keys():
if pKas.has_key(group):
pka = pKas[group]
HH_deviation[group] = {}
for ph in self.curves[group].keys():
try:
HH_deviation[group][ph] = deviation(float(ph),float(pka),float(self.curves[group][ph]))
except:
pass
return HH_deviation
|
### Author: Bert de Bruijn <bert+dstat$debruijn,be>
### VMware ESX kernel vmknic stats
### Displays VMkernel port statistics on VMware ESX servers
# NOTE TO USERS: command-line plugin configuration is not yet possible, so I've
# "borrowed" the -N argument.
# EXAMPLES:
# # dstat --vmknic -N vmk1
# You can even combine the Linux and VMkernel network stats (just don't just "total").
# # dstat --vmknic -n -N vmk0,vswif0
# NB Data comes from /proc/vmware/net/tcpip/ifconfig
class dstat_plugin(dstat):
def __init__(self):
self.name = 'vmknic'
self.nick = ('recv', 'send')
self.open('/proc/vmware/net/tcpip/ifconfig')
self.cols = 2
def check(self):
try:
os.listdir('/proc/vmware')
except:
raise Exception('Needs VMware ESX')
info(1, 'The vmknic module is an EXPERIMENTAL module.')
def discover(self, *list):
ret = []
for l in self.fd[0].splitlines(replace=' /', delim='/'):
if len(l) != 12: continue
if l[2][:5] == '<Link': continue
if ','.join(l) == 'Name,Mtu/TSO,Network,Address,Ipkts,Ierrs,Ibytes,Opkts,Oerrs,Obytes,Coll,Time': continue
if l[0] == 'lo0': continue
if l[0] == 'Usage:': continue
ret.append(l[0])
ret.sort()
for item in list: ret.append(item)
return ret
def vars(self):
ret = []
if op.netlist:
| list = op.netlist
else:
list = self.discover
list.sort()
for name in list:
if name in self.discover + ['total']:
ret.append(name)
return ret
def name(self):
return ['net/'+name for name in self.vars]
def extract(self):
self.set2['total' | ] = [0, 0]
for line in self.fd[0].readlines():
l = line.replace(' /','/').split()
if len(l) != 12: continue
if l[2][:5] == '<Link': continue
if ','.join(l) == 'Name,Mtu/TSO,Network,Address,Ipkts,Ierrs,Ibytes,Opkts,Oerrs,Obytes,Coll,Time': continue
if l[0] == 'Usage:': continue
name = l[0]
if name in self.vars:
self.set2[name] = ( int(l[6]), int(l[9]) )
if name != 'lo0':
self.set2['total'] = ( self.set2['total'][0] + int(l[6]), self.set2['total'][1] + int(l[9]) )
if update:
for name in self.set2:
self.val[name] = list(map(lambda x, y: (y - x) * 1.0 / elapsed, self.set1[name], self.set2[name]))
if step == op.delay:
self.set1.update(self.set2)
# vim:ts=4:sw=4
|
__autho | r__ | = 'benji'
|
#!/usr/bin/env python
import sys
import re
import getopt
from typing import List, Tuple
from feed_maker_util import IO
def main() -> int:
link: str = ""
title: str = ""
url_prefix = ""
state = 0
num_of_recent_feeds = 1000
optlist, _ = getopt.getopt(sys.argv[1:], "f:n:")
for o, a in optlist:
if o == '-n':
num_of_recent_feeds = int(a)
line_list = IO.read_stdin_as_line_list()
result_list: List[Tuple[str, str]] = []
for line in line_list:
if state == 0:
m = re.search(r'var\s+g5_url\s+=\s+"(?P<url_prefix>[^"]+)";', line)
if m:
url_prefix = m.group("url_prefix")
state = 1
elif state == 1:
m = re.search(r'<td[^>]*name="view_list"[^>]*data-role="(?P<link>[^"]+)">', line)
if m:
link = m.group("link")
link = re.sub(r'&', '&', link)
link = url_prefix + link
state = 2
elif state == 2:
m = re.search(r'<td[^ | >]*class="content__title"', line)
if m:
state = 3
elif state == 3:
m = re.search(r'\s*(?P<title>\S[^<>]*)(?:</td>)?\s*', line)
if m:
title = m.group("title")
result_list.append((link, title))
| state = 1
num = len(result_list)
for (link, title) in result_list[:num_of_recent_feeds]:
print("%s\t%03d. %s" % (link, num, title))
num = num - 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from shuup.xtheme.editing import could_edit, is_edit_mode, set_edit_mode
from shuup_tests.utils.faux_users import SuperUser
def test_edit_priv(rf):
request = rf.get("/")
request.user = SuperUser()
request.session = {}
assert could_edit(request)
assert not is_edit_mode(request)
| set_edit_mode(request, True)
assert is_edit_mode(request)
set_edit_mode(request, False)
assert not is_ | edit_mode(request)
|
else:
dxpy.utils.describe.print_desc(dxworkflow.describe(incl_properties=True, incl_details=True),
args.verbose)
except:
err_exit()
def get_workflow_id_and_project(path):
'''
:param path: a path or ID to a workflow object
:type path: string
:returns: tuple of (workflow ID, project ID)
Returns the workflow and project IDs from the given path if
available; otherwise, exits with an appropriate error message.
'''
project, _folderpath, entity_result = try_call(resolve_existing_path, path, expected='entity')
try:
if entity_result is None or not entity_result['id'].startswith('workflow-'):
raise DXCLIError('Could not resolve "' + path + '" to a workflow object')
except:
err_exit()
return entity_result['id'], project
def add_stage(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
# get executable
exec_handler = try_call(dxpy.utils.resolver.get_exec_handler,
args.executable,
args.alias)
exec_inputs = dxpy.cli.exec_io.ExecutableInputs(exec_handler)
try_call(exec_inputs.update_from_args, args, require_all_inputs=False)
# get folder path
folderpath = None
if args.output_folder is not None:
try:
_ignore, folderpath, _none = resolve_path(args.output_folder, expected='folder')
except:
folderpath = args.output_folder
elif args.relative_output_folder is not None:
folderpath = args.relative_output_folder
# process instance type
try_call(process_instance_type_arg, args)
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
stage_id = try_call(dxworkflow.add_stage,
exec_handler,
name=args.name,
folder=folderpath,
stage_input=exec_inputs.inputs,
instance_type=args.instance_type)
if args.brief:
print(stage_id)
else:
dxpy.utils.describe.print_desc(dxworkflow.describe())
def list_stages(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
desc = dxworkflow.describe()
print((printing.BOLD() + printing.GREEN() + '{name}' + printing.ENDC() + ' ({id})').format(**desc))
print()
print('Title: ' + desc['title'])
print('Output Folder: ' + (desc.get('outputFolder') if desc.get('outputFolder') is not None else '-'))
if len(desc['stages']) == 0:
print()
print(' No stages; add stages with the command "dx add stage"')
for i, stage in enumerate(desc['stages']):
stage['i'] = i
print()
if stage['name'] is None:
stage['name'] = '<no name>'
print((printing.UNDERLINE() + 'Stage {i}' + printing.ENDC() + ': {name} ({id})').format(**stage))
print('Executable {executable}'.format(**stage) + \
(" (" + printing.RED() + "inaccessible" + printing.ENDC() + ")" \
if stage.get('accessible') is False else ""))
if stage['folder'] is not None and stage['folder'].startswith('/'):
stage_output_folder = stage['folder']
else:
stage_output_folder = '<workflow output folder>/' + (stage['folder'] if stage['folder'] is not None else "")
print('Output Folder {folder}'.format(folder=stage_output_folder))
if "input" in stage and stage["input"]:
print('Bound input ' + \
('\n' + ' '*16).join([
'{key}={value}'.format(key=key, value=io_val_to_str(stage["input"][key])) for
key in stage['input']
]))
def remove_stage(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
try:
args.stage = int(args.stage)
except:
pass
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
stage_id = try_call(dxworkflow.remove_stage, args.stage)
if args.brief:
print(stage_id)
else:
print("Removed stage " + stage_id)
def update_workflow(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
if not any([args.title, args.no_title, args.summary, args.description, args.output_folder,
args.no_output_folder]):
print('No updates requested; none made')
return
if args.output_folder is not None:
try:
# Try to resolve to an existing path in the project
_ignore, args.output_folder, _ignore = resolve_path(args.output_folder, expected='folder')
except:
# But if not, just use the value directly
pass
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
try_call(dxworkflow.update,
title=args.title,
unset_title=args.no_title,
summary=args.summary,
description=args.description,
output_folder=args.output_folder,
unset_output_folder=args.no_output_folder)
def update_stage(args):
# get workflow
workflow_id, project = get_workflow_id_and_project(args.workflow)
dxworkflow = dxpy.DXWorkflow(workflow_id, project=project)
# process instance type
try_call(process_instance_type_arg, args)
initial_edit_version = dxworkflow.editVersion
try:
args.stage = int(args.stage)
except:
pass
if not any([args.executable, args.name, args.no_name, args.output_folder,
args.relative_output_folder, args.input, args.input_json, args.filename,
args.instance_type]):
print('No updates requested; none made')
return
new_exec_handler = None
if args.executable is not None:
# get executable
new_exec_handler = try_call(dxpy.utils.resolver.get_exec_handler,
args.executable,
| args.alias)
exec_inputs = dxpy.cli.exec_io.ExecutableInputs(new_exec_handler)
try_call(exec_inputs.update_from_args, args, require_all_inputs=False)
stage_input = exec_inputs.inputs
elif args.input or args.input_json or args.filename:
# input is updated, so look up the existing one
existing_exec_handler = dxpy.utils.resolve | r.get_exec_handler(dxworkflow.get_stage(args.stage)['executable'])
exec_inputs = dxpy.cli.exec_io.ExecutableInputs(existing_exec_handler)
try_call(exec_inputs.update_from_args, args, require_all_inputs=False)
stage_input = exec_inputs.inputs
else:
stage_input = None
# get folder path
folderpath = None
if args.output_folder is not None:
try:
_ignore, folderpath, _none = resolve_path(args.output_folder, expected='folder')
except:
folderpath = args.output_folder
elif args.relative_output_folder is not None:
folderpath = args.relative_output_folder
try:
dxworkflow.update_stage(args.stage,
executable=new_exec_handler,
force=args.force,
name=args.name,
unset_name=args.no_name,
folder=folderpath,
stage_input=stage_input,
instance_type=args.instance_type,
edit_version=initial_edit_version)
except InvalidState as e:
if "compatible" in str(e):
err_msg = 'The requested executable could not be verified as a compatible replacement'
if 'incompatibilities' in e.details and e.details['incompatibilities']:
err_msg += ' for the following reasons:\n'
err_msg += '\n'.join([printing.fill(incompat,
initial_indent='- ',
subsequent_ind |
import itertools
import numpy as np
from vanilla_neural_nets.base.loss_function import BaseLossFunction
class CrossEntropyLoss(BaseLossFunction):
@cla | ssmethod
def loss(cls, y_true, y_predicted):
return cls.total_loss(y_true=y_true, y_predicted=y_predicted) / len(y_true)
@classmethod
def total_loss(cls, y_true, y_predicted):
row_indices = np.arange( len(y_true) )
column_ind | ices = y_true
return np.sum([ -np.log(y_predicted[row_indices, column_indices]) ])
@classmethod
def derivative_of_loss_function(cls, y_true, y_predicted):
pass
|
assert_equal(len(v_descendants), len(chain)-1)
for x in v_descendants.keys():
assert_equal(mempool[x], v_descendants[x])
assert chain[0] not in v_descendants.keys()
# Check that ancestor modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=1000)
ancestor_fees = 0
for x in chain:
entry = self.nodes[0].getmempoolentry(x)
ancestor_fees += entry['fee']
assert_equal(entry['fees']['ancestor'], ancestor_fees + Decimal('0.00001'))
assert_equal(entry['ancestorfees'], ancestor_fees * COIN + 1000)
# Undo the prioritisetransaction for later tests
self.nodes[0].prioritisetransaction(txid=chain[0], fee_delta=-1000)
# Check that descendant modified fees includes fee deltas from
# prioritisetransaction
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=1000)
descendant_fees = 0
for x in reversed(chain):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fee']
assert_equal(entry['fees']['descendant'], descendant_fees + Decimal('0.00001'))
assert_equal(entry['descendantfees'], descendant_fees * COIN + 1000)
# Adding one more transaction on to the chain should fail.
assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [txid], [vout], value, fee, 1)
# Check that prioritising a tx before it's added to the mempool works
# First clear the mempool by mining a block.
self.generate(self.nodes[0], 1)
self.sync_blocks()
assert_equal(len(self.nodes[0].getrawmempool()), 0)
# Prioritise a transaction that has been mined, then add it back to the
# mempool by using invalidateblock.
self.nodes[0].prioritisetransaction(txid=chain[-1], fee_delta=2000)
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Keep node1's tip synced with node0
self.nodes[1].invalidateblock(self.nodes[1].getbestblockhash())
# Now check that the transaction is in the mempool, with the right modified fee
descendant_fees = 0
for x in reversed(chain):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fee']
if (x == chain[-1]):
assert_equal(entry['modifiedfee'], entry['fee'] + Decimal("0.00002"))
assert_equal(entry['fees']['modified'], entry['fee'] + Decimal("0.00002"))
assert_equal(entry['descendantfees'], descendant_fees * COIN + 2000)
assert_equal(entry['fees']['descendant'], descendant_fees + Decimal("0.00002"))
# Check that node1's mempool is as expected (-> custom ancestor limit)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert_equal(len(mempool1), MAX_ANCESTORS_CUSTOM)
assert set(mempool1).issubset(set(mempool0))
for tx in chain[:MAX_ANCESTORS_CUSTOM]:
assert tx in mempool1
# TODO: more detailed check of node1's mempool (fees etc.)
# check transaction unbroadcast info (should be false if in both mempools)
mempool = self.nodes[0].getrawmempool(True)
for tx in mempool:
assert_equal(mempool[tx]['unbroadcast'], False)
# TODO: test ancestor size limits
# Now test descendant chain limits
txid = utxo[1]['txid']
value = utxo[1]['amount']
vout = utxo[1]['vout']
transaction_package = []
tx_children = []
# First create one parent tx with 10 children
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 10)
parent_transaction = txid
for i in range(10):
transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value})
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
for _ in rang | e(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = chain_transaction(self.nodes[0], [utxo['txid']], [utxo['vout']], utxo['amount'], fee, 10)
chain.append(txid)
if utxo['txid'] is parent_transaction:
tx_children.append(txid)
for j in range(10):
transaction_package.append({'txid': txid, 'vout': j, 'amount': sent_value})
mempool = self. | nodes[0].getrawmempool(True)
assert_equal(mempool[parent_transaction]['descendantcount'], MAX_DESCENDANTS)
assert_equal(sorted(mempool[parent_transaction]['spentby']), sorted(tx_children))
for child in tx_children:
assert_equal(mempool[child]['depends'], [parent_transaction])
# Sending one more chained transaction will fail
utxo = transaction_package.pop(0)
assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [utxo['txid']], [utxo['vout']], utxo['amount'], fee, 10)
# Check that node1's mempool is as expected, containing:
# - txs from previous ancestor test (-> custom ancestor limit)
# - parent tx for descendant test
# - txs chained off parent tx (-> custom descendant limit)
self.wait_until(lambda: len(self.nodes[1].getrawmempool()) ==
MAX_ANCESTORS_CUSTOM + 1 + MAX_DESCENDANTS_CUSTOM, timeout=10)
mempool0 = self.nodes[0].getrawmempool(False)
mempool1 = self.nodes[1].getrawmempool(False)
assert set(mempool1).issubset(set(mempool0))
assert parent_transaction in mempool1
for tx in chain[:MAX_DESCENDANTS_CUSTOM]:
assert tx in mempool1
for tx in chain[MAX_DESCENDANTS_CUSTOM:]:
assert tx not in mempool1
# TODO: more detailed check of node1's mempool (fees etc.)
# TODO: test descendant size limits
# Test reorg handling
# First, the basics:
self.generate(self.nodes[0], 1)
self.sync_blocks()
self.nodes[1].invalidateblock(self.nodes[0].getbestblockhash())
self.nodes[1].reconsiderblock(self.nodes[0].getbestblockhash())
# Now test the case where node1 has a transaction T in its mempool that
# depends on transactions A and B which are in a mined block, and the
# block containing A and B is disconnected, AND B is not accepted back
# into node1's mempool because its ancestor count is too high.
# Create 8 transactions, like so:
# Tx0 -> Tx1 (vout0)
# \--> Tx2 (vout1) -> Tx3 -> Tx4 -> Tx5 -> Tx6 -> Tx7
#
# Mine them in the next block, then generate a new tx8 that spends
# Tx1 and Tx7, and add to node1's mempool, then disconnect the
# last block.
# Create tx0 with 2 outputs
utxo = self.nodes[0].listunspent()
txid = utxo[0]['txid']
value = utxo[0]['amount']
vout = utxo[0]['vout']
send_value = (value - fee) / 2
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for _ in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
txid = self.nodes[0].sendrawtransaction(signedtx['hex'])
tx0_id = txid
value = send_value
# Create tx1
tx1_id, _ = chain_transaction(self.nodes[0], [tx0_id], [0], value, fee, 1)
# Create tx2-7
vout = 1
txid = tx0_id
for _ in range(6):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 1)
vout = 0
value = sent_value
# Mine these in a block
self.generate(self.nodes[0], 1)
self.sync_all()
# Now generate tx8, with a big fee
inputs = [ |
from matplotlib import pyplot as plt
import numpy as np
class Spline(object):
"""Forms a cublic spline on an interval given values and derivatives at the endpoints of that interval."""
def __init__(self, x1, y1, dy1, x2, y2, dy2):
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
self.dy1 = dy1
self.dy2 = dy2
def T(self, x):
return (x - self.x1) / (self.x2 - self.x1)
def Value(self, x):
t = self.T(x)
return (1-t)**2 * ((1+2*t) * self.y1 + t * (self.x2 - self.x1) * self.dy1) \
+ t**2 * ((3-2*t) * self.y2 + (t-1) * (self.x2 - self.x1) * self.dy2)
def Derivative(self, x):
t = self.T(x)
dtdx = 1./(self.x2 - self.x1)
dydt = (6*t**2 - 6*t)* self.y1 \
+ (3*t**2 - 4*t + 1) * (self.x2 - self.x1) * self.dy1 \
+ (-6*t**2 + 6*t) * self.y2 \
+ (3*t**2 - 2*t) * (self.x2 - self.x1) * self.dy2
return dydt * dtdx
class VanGenuchten(object):
def __init__( self, alpha, n, sr, l=0.5, smoothing_interval_sat=0.0, smoothing_interval_p=0.0 ):
self._alpha = alpha
self._n = n
self._sr = sr
self._l = l
self._m = 1 - 1.0/n
# smoothing for sat
self._s0 = 1.0 - smoothing_interval_sat
if self._s0 < 1.:
self._spline = Spline(self._s0, self.k_relative(self._s0), self.d_k_relative(self._s0),
1.0, 1.0, 0.)
# smoothing for pc
self._pc0 = smoothing_interval_p
if self._pc0 > 0.:
self._spline_sat = Spline(0., 1., 0., self._pc0, self.saturation(self._pc0), self.d_saturation(self._pc0))
def capillaryPressure( self, s ):
if s <= self._sr:
return np.inf
if s >= 1.:
return 0.
se = (s - self._sr) / (1.0 - self._sr)
if (se < 1.e-8):
return pow(se, -1.0/(self._m * self._n)) / self._alpha
else:
return (pow(pow(se, -1.0/self._m) - 1.0, 1/self._n)) / self._alpha
def saturation( self, pc ):
if pc <= 0.0:
return 1.0
elif pc < self._pc0:
return self._spline_sat.Value(pc)
else:
se = pow(1.0 + pow(self._alpha*pc, self._n), -self._m)
return se * (1.0 - self._sr) + self._sr
def k_relative( self, s ):
if s >= 1.:
return 1.
elif s <= self._sr:
return 0.
elif s <= self._s0:
se = (s - self._sr) / (1.0-self._sr)
return (se**self._l) * pow( 1.0 - pow( 1.0 - pow(se,1.0/self._m),self._m), 2)
else:
return self._spline.Value(s)
def d_k_relative( self, s ):
if s >= 1.:
return 0
elif s <= self._sr:
return | 0.
elif s <= self._s0 + 1.e-6:
se = (s - self._sr)/(1-self._sr);
x = pow(se, 1.0 / self._m);
if (abs(1.0 - x) < 1.e-10):
return 0.0;
y = pow(1 | .0 - x, self._m);
dkdse = (1.0 - y) * (self._l * (1.0 - y) + 2 * x * y / (1.0 - x)) * pow(se, self._l - 1.0);
return dkdse / (1 - self._sr);
else:
return self._spline.Derivative(s)
def label( self ):
return "VG: a=%1.2e [1/Pa], n=%1.2g, sr=%1.2g, smooth=%g"%(self._alpha, self._n, self._sr, 1-self._s0)
def short_label( self ):
return "VG: a=%1.2e [1/Pa], n=%1.2g, sr=%1.2g"%(self._alpha, self._n, self._sr)
def plot(self, ax=None, color='b', format='-', label=None, y_units='Pa'):
pc = np.linspace(0,7, 1000)
pc = 10**pc
if label is None:
label = self.short_label()
if ax is None:
fig,ax = plt.subplots(1,1,squeeze=True)
s = np.array([self.saturation(apc) for apc in pc])
if y_units == 'hPa':
pc = pc / 100.
elif y_units == 'm':
pc = pc / 1000 / 9.81
elif y_units == 'cm':
pc = pc / 1000 / 9.81 * 100
elif y_units == 'Pa':
pass
else:
raise ValueError("Invalid units for yaxis, must be one of [Pa, m, cm, hPa]")
ax.semilogy(s, pc, color=color, label=label)
ax.set_xlabel("saturation [-]")
ax.set_ylabel("capillary pressure [{}]".format(y_units))
return ax
def plot_kr(self, ax=None, color='b', format='-', label=None):
if ax is None:
fig,ax = plt.subplots(1,1,squeeze=True)
if label is None:
label = self.short_label()
pc = np.linspace(0,7, 1000)
pc = 10**pc
sat = np.array([self.saturation(apc) for apc in pc])
kr = np.array([self.k_relative(s) for s in sat])
ax.plot(sat, kr, color=color, label=label)
if __name__ == "__main__":
import sys
import argparse
import shlex
import colors
parser = argparse.ArgumentParser('plot WRM curves')
def option_to_wrm(s):
print("got: {}".format(s))
try:
s = shlex.split(s)
print("s = {}".format(s))
assert(3 <= len(s) <= 5)
alpha, n, sr = map(float, s[0:3])
if len(s) > 3:
label = s[3]
else:
label = None
if len(s) > 4:
smooth_int_sat = float(s[4])
else:
smooth_int_sat = 0.
print("WRM:")
print(f" alpha = {alpha}")
print(f" n = {n}")
print(f" sr = {sr}")
print(f" smoothing_interval_sat = {smooth_int_sat}")
print(f" label = {label}")
except:
raise argparse.ArgumentTypeError("WRM must be van Genucten parameters (alpha, n, sr, label, smoothing_interval_sat)")
else:
return label, VanGenuchten(alpha=alpha, n=n, sr=sr, smoothing_interval_sat=smooth_int_sat)
parser.add_argument('--wrm', type=option_to_wrm, action='append', help='WRM parameters, "alpha n sr [label [smoothing_interval_sat]]"')
parser.add_argument('--y-units', type=str, choices=['Pa','m','hPa','cm'], default='Pa', help='units of the y-axis, in log space')
parser.add_argument('--kr', action='store_true', help='Plot relative permeability curve')
args = parser.parse_args()
color_list = colors.enumerated_colors(len(args.wrm))
fig = plt.figure()
ax = fig.add_subplot(111)
if args.kr:
for (label,wrm), color in zip(args.wrm, color_list):
wrm.plot_kr(ax, color, label=label)
else:
for (label,wrm), color in zip(args.wrm, color_list):
wrm.plot(ax, color, y_units=args.y_units, label=label)
ax.legend()
plt.show()
sys.exit(0)
|
def func():
value = "not-none"
# pylint | : disable=unused-argument | 1
<caret>if value is None:
print("None")
# pylint: disable=unused-argument2
print(value)
|
MyObj)
# object-action updates dict case
thing = {'foo': obj.obj_to_primitive()}
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(thing, primitive)
thing2 = ser.deserialize_entity(self.context, thing)
self.assertIsInstance(thing2['foo'], base.NovaObject)
class TestArgsSerializer(test.NoDBTestCase):
def setUp(self):
super(TestArgsSerializer, self).setUp()
self.now = timeutils.utcnow()
self.str_now = timeutils.strtime(at=self.now)
@base.serialize_args
def _test_serialize_args(self, *args, **kwargs):
expected_args = ('untouched', self.str_now, self.str_now)
for index, val in enumerate(args):
self.assertEqual(expected_args[index], val)
expected_kwargs = {'a': 'untouched', 'b': self.str_now,
'c': self.str_now}
for key, val in six.iteritems(kwargs):
self.assertEqual(expected_kwargs[key], val)
def test_serialize_args(self):
self._test_serialize_args('untouched', self.now, self.now,
a='untouched', b=self.now, c=self.now)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Agent': '1.0-c0c092abaceb6f51efe5d82175f15eba',
'AgentList': '1.0-5a7380d02c3aaf2a32fc8115ae7ca98c',
'Aggregate': '1.1-1ab35c4516f71de0bef7087026ab10d1',
'AggregateList': '1.2-fb6e19f3c3a3186b04eceb98b5dadbfa',
'BandwidthUsage': '1.2-c6e4c779c7f40f2407e3d70022e3cd1c',
'BandwidthUsageList': '1.2-5fe7475ada6fe62413cbfcc06ec70746',
'BlockDeviceMapping': '1.14-d44d8d694619e79c172a99b3c1d6261d',
'BlockDeviceMappingList': '1.15-6fa262c059dad1d519b9fe05b9e4f404',
'CellMapping': '1.0-7f1a7e85a22bbb7559fc730ab658b9bd',
'ComputeNode': '1.12-71784d2e6f2814ab467d4e0f69286843',
'ComputeNodeList': '1.12-3b6f4f5ade621c40e70cb116db237844',
'DNSDomain': '1.0-7b0b2dab778454b6a7b6c66afe163a1a',
'DNSDomainList': '1.0-4ee0d9efdfd681fed822da88376e04d2',
'EC2Ids': '1.0-474ee1094c7ec16f8ce657595d8c49d9',
'EC2InstanceMapping': '1.0-a4556eb5c5e94c045fe84f49cf71644f',
'EC2SnapshotMapping': '1.0-47e7ddabe1af966dce0cfd0ed6cd7cd1',
'EC2VolumeMapping': '1.0-5b713751d6f97bad620f3378a521020d',
'FixedIP': '1.12-b5818a33996228fc146f096d1403742c',
'FixedIPList': '1.12-87a39361c8f08f059004d6b15103cdfd',
'Flavor': '1.1-b6bb7a730a79d720344accefafacf7ee',
'FlavorList': '1.1-52b5928600e7ca973aa4fc1e46f3934c',
'FloatingIP': '1.8-52a67d52d85eb8b3f324a5b7935a335b',
'FloatingIPList': '1.9-7f2ba670714e1b7bab462ab3290f7159',
'HostMapping': '1.0-1a3390a696792a552ab7bd31a77ba9ac',
'HVSpec': '1.1-6b4f7c0f688cbd03e24142a44eb9010d',
'ImageMeta': '1.5-642d1b2eb3e880a367f37d72dd76162d',
'ImageMetaProps': '1.5-93a74996a8d3c2aa821fddab301a9b1a',
| 'Instance': '1.22-260d385315d4868b6397c61a13109841',
'InstanceAction': '1.1-f9f293e526b66fca0d05c3b3a2d13914',
| 'InstanceActionEvent': '1.1-e56a64fa4710e43ef7af2ad9d6028b33',
'InstanceActionEventList': '1.1-13d92fb953030cdbfee56481756e02be',
'InstanceActionList': '1.0-4a53826625cc280e15fae64a575e0879',
'InstanceExternalEvent': '1.1-6e446ceaae5f475ead255946dd443417',
'InstanceFault': '1.2-7ef01f16f1084ad1304a513d6d410a38',
'InstanceFaultList': '1.1-f8ec07cbe3b60f5f07a8b7a06311ac0d',
'InstanceGroup': '1.9-a413a4ec0ff391e3ef0faa4e3e2a96d0',
'InstanceGroupList': '1.6-be18078220513316abd0ae1b2d916873',
'InstanceInfoCache': '1.5-cd8b96fefe0fc8d4d337243ba0bf0e1e',
'InstanceList': '1.20-5f6eba7d94c4e4ad6fc9a3347f85f4a7',
'InstanceMapping': '1.0-47ef26034dfcbea78427565d9177fe50',
'InstanceMappingList': '1.0-9e982e3de1613b9ada85e35f69b23d47',
'InstanceNUMACell': '1.2-535ef30e0de2d6a0d26a71bd58ecafc4',
'InstanceNUMATopology': '1.2-d944a7d6c21e1c773ffdf09c6d025954',
'InstancePCIRequest': '1.1-b1d75ebc716cb12906d9d513890092bf',
'InstancePCIRequests': '1.1-fc8d179960869c9af038205a80af2541',
'KeyPair': '1.3-bfaa2a8b148cdf11e0c72435d9dd097a',
'KeyPairList': '1.2-58b94f96e776bedaf1e192ddb2a24c4e',
'Migration': '1.2-8784125bedcea0a9227318511904e853',
'MigrationList': '1.2-02c0ec0c50b75ca86a2a74c5e8c911cc',
'MonitorMetric': '1.1-53b1db7c4ae2c531db79761e7acc52ba',
'MonitorMetricList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'NUMACell': '1.2-74fc993ac5c83005e76e34e8487f1c05',
'NUMAPagesTopology': '1.0-c71d86317283266dc8364c149155e48e',
'NUMATopology': '1.2-c63fad38be73b6afd04715c9c1b29220',
'NUMATopologyLimits': '1.0-9463e0edd40f64765ae518a539b9dfd2',
'Network': '1.2-a977ab383aa462a479b2fae8211a5dde',
'NetworkList': '1.2-69eca910d8fa035dfecd8ba10877ee59',
'NetworkRequest': '1.1-7a3e4ca2ce1e7b62d8400488f2f2b756',
'NetworkRequestList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'PciDevice': '1.3-d92e0b17bbed61815b919af6b8d8998e',
'PciDeviceList': '1.2-3757458c45591cbc92c72ee99e757c98',
'PciDevicePool': '1.1-3f5ddc3ff7bfa14da7f6c7e9904cc000',
'PciDevicePoolList': '1.1-15ecf022a68ddbb8c2a6739cfc9f8f5e',
'Quotas': '1.2-1fe4cd50593aaf5d36a6dc5ab3f98fb3',
'QuotasNoOp': '1.2-e041ddeb7dc8188ca71706f78aad41c1',
'S3ImageMapping': '1.0-7dd7366a890d82660ed121de9092276e',
'SecurityGroup': '1.1-0e1b9ba42fe85c13c1437f8b74bdb976',
'SecurityGroupList': '1.0-dc8bbea01ba09a2edb6e5233eae85cbc',
'SecurityGroupRule': '1.1-ae1da17b79970012e8536f88cb3c6b29',
'SecurityGroupRuleList': '1.1-674b323c9ccea02e93b1b40e7fd2091a',
'Service': '1.16-f1c6e82b5479f63e35970fe7625c3878',
'ServiceList': '1.14-b767102cba7cbed290e396114c3f86b3',
'TaskLog': '1.0-78b0534366f29aa3eebb01860fbe18fe',
'TaskLogList': '1.0-cc8cce1af8a283b9d28b55fcd682e777',
'Tag': '1.1-8b8d7d5b48887651a0e01241672e2963',
'TagList': '1.1-55231bdb671ecf7641d6a2e9109b5d8e',
'VirtCPUFeature': '1.0-3310718d8c72309259a6e39bdefe83ee',
'VirtCPUModel': '1.0-6a5cc9f322729fc70ddc6733bacd57d3',
'VirtCPUTopology': '1.0-fc694de72e20298f7c6bab1083fd4563',
'VirtualInterface': '1.0-19921e38cba320f355d56ecbf8f29587',
'VirtualInterfaceList': '1.0-9750e2074437b3077e46359102779fc6',
}
class TestObjectVersions(test.NoDBTestCase):
@staticmethod
def _is_method(thing):
# NOTE(dims): In Python3, The concept of 'unbound methods' has
# been removed from the language. When referencing a method
# as a class attribute, you now get a plain function object.
# so let's check for both
return inspect.isfunction(thing) or inspect.ismethod(thing)
def _find_remotable_method(self, cls, thing, parent_was_remotable=False):
"""Follow a chain of remotable things down to the original function."""
if isinstance(thing, classmethod):
return self._find_remotable_method(cls, thing.__get__(None, cls))
elif self._is_method(thing) and hasattr(thing, 'remotable'):
return self._find_remotable_method(cls, thing.original_fn,
parent_was_remotable=True)
elif parent_was_remotable:
# We must be the first non-remotable thing underneath a stack of
# remotable things (i.e. the actual implementation method)
return thing
else:
# This means the top-level thing never hit a remotable layer
return None
def _un_unicodify_enum_valid_values(self, _fields):
for name, field in _fields:
if not isinstance(field, (fields.BaseEnumField,
fields.EnumField)):
continue
orig_type = type(field._type._valid_values)
field._type._valid_values = orig_type(
[x.encode('utf-8') for x in
field._type._valid_values])
def _get_fingerprint(self, obj_name):
obj_classes = base.NovaObjectRegistry.obj_classes()
obj_class = obj_classes[obj_name][0]
fields = list(obj_class.fields.items())
# NOTE(danms): We store valid_values |
"""Seqan Doc Links for Trac.
Version 0.1.
Copyright (C) 2010 Manuel Holtgrewe
Install by copying this file into the plugins directory of your trac
work directory. In your trac.ini, you can use something like this
(the following also shows the defaults).
[seqan_doc_links]
prefix = seqan
base_url = http://www.seqan.de/dddoc/html/
dox_prefix = dox
dox_base_url = http://docs.seqan.de/dev3/
Use something like this to test the plugin:
* {{{[seqan:Page.Sequences]}}} [seqan:Page.Sequences]
* {{{seqan:Class.Finder}}} seqan:Class.Finder
* {{{seqan:"Concept.Simple Type"}}} seqan:"Concept.Simple Type"
* {{{seqan:"Spec.Chunk Pool Allocator}}} seqan:"Spec.Chunk Pool Allocator"
*
* {{{dox:ContainerConcept#length}}}
"""
import urllib
import sys
from trac.core import *
import trac.wiki
import genshi.builder as gb
import genshi
from trac.web.chrome import ITemplateProvider, add_stylesheet
def getFilename(cat, item):
"""Get the filename that dddoc would create.
Args:
cat String, category of the link.
item String, name of the item.
Returns:
File name of the categorized item.
"""
return cat.upper() + escapeFiles(item) + ".html"
def escapeFiles(text):
"""Escape the file name as dddoc would do it.
Args:
text String with the text to escape.
Returns:
Escaped text.
"""
text = text.replace("_", "__")
ret = ""
for i in range(len(text)):
if (text[i] >= 'A') and (text[i] <= 'Z'):
ret += "_"
ret += text[i]
ret = ret.replace("\t", "_09")
ret = ret.replace("\n", "_0a")
ret = ret.replace("!", "_21")
ret = ret.replace("\"", "_22")
ret = ret.replace("#", "_23")
ret = ret.replace("$", "_24")
ret = ret.replace("%", "_25")
ret = ret.replace("&", "_26")
ret = ret.replace("'", "_27")
ret = ret.replace("(", "_28")
ret = ret.replace(")", "_29")
ret = ret.replace("*", "_2a")
ret = ret.replace("+", "_2b")
ret = ret.replace("/", "_2f")
ret = ret.replace(":", "_3a")
ret = ret.replace(",", "_2c")
ret = ret.replace("<", "_3c")
ret = ret.replace(">", "_3e")
ret = ret.replace("?", "_3f")
ret = ret.replace("\\", "_5c")
ret = ret.replace("|", "_7c")
ret = ret.replace(" ", "+")
if (len(ret) == 0) or (ret[0] == '_'): return ret
else: return '.'+ret
class SeqanDocsSyntaxProvider(trac.core.Component):
"""Expands seqan:<Category>.<EntryName> links."""
trac.core.implements(trac.wiki.IWikiSyntaxProvider)
implements(ITemplateProvider)
SECTION_NAME = 'seqan_doc_links'
DEFAULT_PREFIX = 'seqan'
DEFAULT_BASE_URL = 'http://www.seqan.de/dddoc/html/'
def __init__(self):
# Set defaults.
self.prefix = self.DEFAULT_PREFIX
self.base_url = self.DEFAULT_BASE_URL
# Parse configuration from trac.ini config file.
for option in self.config.options(self.SECTION_NAME):
if option[0] == 'prefix':
self.prefix = option[1]
if option[0] == 'base_url':
self.base_url = option[1]
def get_wiki_syntax(self):
"""Method from IWikiSyntaxProvider.
Returns empty list, we do not implement any."""
return []
def get_link_resolvers(self):
"""Method from IWikiSyntaxProvider.
Returns iterable (list) of (prefix, function) pairs.
"""
return [(self.prefix, self.format_doc_link)]
def format_doc_link(self, formatter, ns, target, label):
"""Function to perform formatting for seqan:XYZ links.
This roughly follows [1].
[1] http://trac.edgewall.org/wiki/TracDev/IWikiSyntaxProviderExample
"""
add_stylesheet(formatter.req, 'doc_links/css/doc_links.css')
# The following is a heuristic for "no alternative label".
if ns in label and target in label:
if '.' in target:
category, item = tuple(target.split('.', 1))
label = item
# Strip everything before and including the first hash.
if '#' in label:
label = label.split('#', 1)[1]
else:
label = target
# Ignore if the target does not contain a dot.
if not '.' in target:
return target
# Now, use dddoc's logic to generate the appropriate file name for
file_name = getFilename(*target.split('.', 1))
span = [gb.tag.span(genshi.HTML(' '), class_='icon'), label]
title = ' "%s" in SeqAn documentation.' % target
return gb.tag.a(span, class_='doc-link',
href=self.base_url + file_name, title=title)
### ITemplateProvider methods
def get_templates_dirs(self):
return []
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('doc_links', resource_filename(__name__, 'htdocs'))]
class SeqanDosSyntaxProvider(trac.core.Component):
"""Expands dox:<entry-id> links."""
trac.core.implements(trac.wiki.IWikiSyntaxProvider)
implements(ITemplateProvider)
SECTION_NAME = 'seqan_doc_links'
DEFAULT_PREFIX = 'dox'
DEFAULT_BASE_URL = 'http://www.seqan.de/dddoc/html/'
def __init__(self):
# Set defaults.
self.prefix = self.DEFAULT_PREFIX
self.base_url = self.DEFAULT_BASE_URL
# Parse configuration from trac.ini config file.
for option in self.config.options(self.SECTION_NAME):
if option[0] == 'dox_prefix':
self.prefix = option[1]
if option[0] == 'dox_base_url':
self.base_url = option[1]
def get_wiki_syntax(self):
"""Method from IWikiSyntaxProvider.
Returns empty list, we do not implement any."""
return []
def get_link_resolvers(self):
"""Method from IWikiSyntaxProvider.
Returns iterable (list) of (prefix, function) pairs.
"""
return [(self.prefix, self.format_doc_link)]
def format_doc_link(self, formatter, ns, target, label):
"""Function to perform formatting for dox:XYZ links.
This roughly follows [1].
[1] http://trac.edgewall.org/wiki/TracDev/IWikiSyntaxProviderExample
"""
| # Stylesheet already | done for doc_links.
add_stylesheet(formatter.req, 'doc_links/css/doc_links.css')
# The following is a heuristic for "no alternative label".
if not label:
label = target
if label.startswith(self.prefix + ':'):
label = label[len(self.prefix) + 1:]
# Now, use dddoc's logic to generate the appropriate file name for
query = '?p=%s' % target # TODO(holtgrew): url encode
span = [gb.tag.span(genshi.HTML(' '), class_='icon'), label]
title = ' "%s" in SeqAn documentation.' % target
return gb.tag.a(span, class_='doc-link',
href=self.base_url + query, title=title)
### ITemplateProvider methods
def get_templates_dirs(self):
return []
def get_htdocs_dirs(self):
from pkg_resources import resource_filename
return [('doc_links', resource_filename(__name__, 'htdocs'))]
|
import re
from rest_framework import serializers
from seahub.auth import authenticate
from seahub.api2.models import Token, TokenV2, DESKTOP_PLATFORMS
from seahub.api2.utils import get_client_ip
from seahub.utils import is_valid_username
def all_none(values):
for value in values:
if value is not None:
return False
return True
def all_not_none(values):
for value in values:
if value is None:
return False
return True
_ANDROID_DEVICE_ID_PATTERN = re.compile('^[a-f0-9]{1,16}$')
class AuthTokenSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
# There fields are used by TokenV2
platform = serializers.CharField(required=False)
device_id = serializers.CharField(required=False)
device_name = serializers.CharField(required=False)
# These fields may be needed in the future
client_version = serializers.CharField(required=False)
platform_version = serializers.CharField(required=False)
def validate(self, attrs):
username = attrs.get('username')
password = attrs.get('password')
platform = attrs.get('platform', None)
device_id = attrs.get('device_id', None)
device_name = attrs.get('device_name', None)
client_version = attrs.get('client_version', None)
platform_version = attrs.get('platform_version', None)
v2_fields = (platform, device_id, device_name, client_version, platform_version)
# Decide the version of token we need
if all_none(v2_fields):
v2 = False
elif all_not_none(v2_fields):
v2 = True
else:
raise serializers.ValidationError('invalid params')
# first check username and password
if username:
if not is_valid_username(username):
raise serializers.ValidationError('username is not valid.')
if username and password:
user = authenticate(username=username, password=password)
if user:
if not user.is_active:
raise serializers.ValidationError('User account is disabled.')
else:
raise serializers.ValidationError('Unable to login with provided credentials.')
else:
raise serializers.ValidationError('Must include "username" and "password"')
|
# Now user is authenticated
if v2:
token = self.get_token_v2(username, platform, device_id, device_name,
client_version, platform_version)
else:
token = self.get_token_v1(username)
return token.key
def get_token_v1(self, username):
token, created = Token.objects.get_or_create(user=username)
return token
def get_token_v2 | (self, username, platform, device_id, device_name,
client_version, platform_version):
if platform in DESKTOP_PLATFORMS:
# desktop device id is the peer id, so it must be 40 chars
if len(device_id) != 40:
raise serializers.ValidationError('invalid device id')
elif platform == 'android':
# See http://developer.android.com/reference/android/provider/Settings.Secure.html#ANDROID_ID
# android device id is the 64bit secure id, so it must be 16 chars in hex representation
# but some user reports their device ids are 14 or 15 chars long. So we relax the validation.
if not _ANDROID_DEVICE_ID_PATTERN.match(device_id.lower()):
raise serializers.ValidationError('invalid device id')
elif platform == 'ios':
if len(device_id) != 36:
raise serializers.ValidationError('invalid device id')
else:
raise serializers.ValidationError('invalid platform')
request = self.context['request']
last_login_ip = get_client_ip(request)
return TokenV2.objects.get_or_create_token(username, platform, device_id, device_name,
client_version, platform_version, last_login_ip)
class AccountSerializer(serializers.Serializer):
email = serializers.EmailField()
password = serializers.CharField()
is_staff = serializers.BooleanField(default=False)
is_active = serializers.BooleanField(default=True)
|
# Copyright (c) 2013, Web Notes Tech | nologies | Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("regional", "doctype", "gst_settings")
frappe.reload_doc("accounts", "doctype", "gst_account")
gst_settings = frappe.get_doc("GST Settings")
gst_settings.b2c_limit = 250000
gst_settings.save()
|
import os
yandex_api_key = os.environ['YANDEX_API_KEY']
weather_undergroun | d_api_key = os.environ['WEATHER_UNDER | GROUND_API_KEY']
|
in ('-U'):
image_file = args
print ("===============================================================================")
print ("baud_rate = ", baud_rate)
print ("com_port = ", com_port)
print ("image file = ", image_file)
print ("===============================================================================")
try:
ocd = OCD_8051 (com_port, baud_rate, verbose=0)
except:
print ("Failed to open COM port")
sys.exit(1)
class dummy_console:
#############################################################################
# command procedures
#############################################################################
_DEBUG_COUNTER_INDEX_RESET = 1
_DEBUG_COUNTER_INDEX_SET = 2
_TIME_COUNTER_INDEX_RESET = 3
_TIME_COUNTER_INDEX_SET = 4
def _string_to_data (self, data_string):
if (data_string.startswith('0x')):
data = int(data_string[2:], 16)
else:
data = int(data_string)
return data
def _do_reset_cpu (self):
self._ocd._serial.reset_output_buffer()
self._ocd._serial.reset_input_buffer()
self._ocd.cpu_reset()
self._ocd._serial.reset_output_buffer()
self._ocd._serial.reset_input_buffer()
def _do_pause_cpu (self):
self._ocd.cpu_pause (1, 1)
def _do_resume_cpu (self):
self._ocd.cpu_pause (0, 1)
def _write_code (self, addr, data):
offset = 0
length = len (data)
addr_end = addr + length
if (addr % 4):
for i in range (min([(4 - (addr % 4)), length])):
self._ocd.code_mem_write_byte (addr + offset, data[i])
offset = offset + 1
total_words = (addr_end - addr - offset) // 4
total_128byte_frame = total_words //32
for i in range (total_128byte_frame):
self._ocd.code_mem_write_128byte (addr + offset, data[offset : offset + 128])
offset = offset + 128
for i in range (total_words - total_128byte_frame * 32):
data_int = (data[offset] << 24) + \
(data[offset + 1] << 16) + \
(data[offset + 2] << 8) + \
(data[offset + 3])
##print ("write32bit addr = ", addr + offset, "data_int=", hex(data_int))
self._ocd.code_mem_write_32bit(addr + offset, data_int)
offset = offset + 4
for i in range (length - offset):
self._ocd.code_mem_write_byte (addr + offset, data [offset])
offset = offset + 1
def _do_load_hex_file (self):
intel_hex_file = Intel_Hex(self._args[1])
if (len (intel_hex_file.data_record_list) == 0):
return
if (len(self._args) > 2):
try:
f = open(self._args[2], 'w')
except IOError:
print ("Fail to open: ", self._args[2])
return
#self._do_pause_cpu()
#print ("CPU paused");
#print ("CPU reset ...")
#self._do_reset_cpu()
#sleep(0.5)
print ("Loading...", self._args[1])
last_addr = intel_hex_file.data_record_list[-2].address + len(intel_hex_file.data_record_list[-1].data_list)
len_completed = 0
address = 0
merge_data_list = []
start_time = time.clock()
print_cnt = 0
print ("Writing | ", end="")
for record in intel_hex_file.data_record_list:
#print ("xxxxaddr=", record.address, "data=", record.data_list)
if ((print_cnt % 16) == 0):
print("#", end="")
sys.stdout.flush()
print_cnt = print_cnt + 1
if (len(merge_data_list) == 0):
address = record.address
merge_data_list = record.data_list
#print ("YY addr = ", address, " ", len (merge_data_list))
elif ((address + len (merge_data_list)) == record.address):
merge_data_list = merge_data_list + record.data_list
#print ("WW addr = ", address, " ", len (merge_data_list))
#print (merge_data_list)
else:
#print ("XXXXXXXXXXXXXXX ", address, " ", len(merge_data_list))
self._write_code (address, merge_data_list)
#print ("YYYYYYYYYYYYYYYY")
len_completed = len_completed + len(merge_data_list)
load_progress = math.ceil(len_completed * 100 / last_addr);
if (load_progress > 100):
load_progress = 100
#print ("\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b", end="")
#print ("%d%% completed" % load_progress, end="")
print("#", end="")
sys.stdout.flush()
if (len(self._args) > 2):
f.write('addr %d\n' % (address))
for item in merge_data_list:
f.write('%d\n' % (item))
address = record.address
merge_data_list = record.data_list
if (len(self._args) > 2):
f.close()
end_time = time.clock()
delta_time = end_time - start_time
print (" | 100% {0:0.2f}s".format(delta_time))
self._do_resume_cpu()
print ("\nCPU reset ...")
self._do_reset_cpu()
print ("Done: ", last_addr, " Byte(s)")
print ("CPU is running")
def _do_uart_select (self):
self._ocd.uart_select (1 - self.uart_raw_mode_enable)
def _do_uart_switch (self):
self.uart_raw_mode_enable = 1 - self.uart_raw_mode_enable
self._ocd._serial.reset_output_buffer()
self._ocd._serial.reset_input_buffer()
self._do_uart_select()
self._ocd._serial.write ([ord('\r')])
sleep(0.5)
if (self._ocd._serial.in_waiting):
r = self._ocd._serial.read (self._ocd._serial.in_waiting)
prt_out = ""
for i in r:
if (i < 128):
prt_out = prt_out + chr(i)
#print (prt_out, end="")
#sys.stdout.flush()
def _do_load_hex_and_switch (self):
self._do_load_hex_file()
self._do_uart_switch()
#############################################################################
# static variables
#############################################################################
#############################################################################
# Methods
#############################################################################
def __init__ (self, ocd):
self._ocd = ocd
self.uart_raw_mode_enable = 0
self._do_uart_select()
if (ocd._serial.in_waiting):
r = ocd._serial.read (ocd._serial.in_waiting) # clear the uart receive buffer
| self._do_pause_cpu()
print ("CPU paused");
sleep(0.5)
print ("CPU reset ...")
self._do_reset_cpu()
| sleep(0.5)
self.uart_raw_mode_enable = 0
self._do_uart_select()
console = dummy_console(ocd)
console._args = ("load_hex_and_switch " + image_file).split()
console._do_load_hex_and_switch ()
#for k in sys.argv[1:]:
# if k.startswith("-U"):
# U = k[2:].split(':')
# file_name = U[2] + ":" + U[3]
# print (file_name)
# elif k.startswith("-b"):
# baud_rate = int(k[2:])
# print ("baud_rate = ", baud_rate)
#f |
import fileinput
def str_to_int(s):
return([ int(x) for x in s.split() ])
# args = [ 'line 1', 'line 2', ... ]
def proc_input(args):
(n, l) = str_to_int(args[0])
a = tuple(str_to_int(args[1]))
return(l, a)
def solve(args, verbose=False):
(l, a) = proc_input(args)
list_a = list(a)
list_a.sort()
max_dist = max(list_a[0] * 2, (l - list_a[-1]) * 2)
for x in xrange(len(a) - 1):
max_dist = max(max_dist, list_a[x + 1] - list_a[x])
if verbose:
print max_dist / float(2 | )
return max_dist / float(2)
def test():
assert(str_to_int('1 2 3') == [ 1, 2, 3 ])
assert(proc_input([ '2 5', '2 5' ]) == (5, (2, 5)))
assert(solve([ '2 5', '2 5' ]) == 2.0)
assert(solve([ '4 5', '0 1 2 3' ]) == 2.0)
assert(solve([ '7 15', '15 5 3 7 9 14 0' ]) == 2.5)
if __name__ == '__main__':
from sys import argv
if argv.pop() == ' | test':
test()
else:
solve(list(fileinput.input()), verbose=True)
|
from tests import tests
def test_toggle():
temporary = tests | .toggled_seats
assert temporary == [[1, 1, 1], [1, 1, 1], [1, | 1, 1]] |
, 3)
assert _has_rational_power(sqrt(x)*x**(S(1)/3), x) == (True, 6)
def test_solveset_sqrt_1():
assert solveset_real(sqrt(5*x + 6) - 2 - x, x) == \
FiniteSet(-S(1), S(2))
assert solveset_real(sqrt(x - 1) - x + 7, x) == FiniteSet(10)
assert solveset_real(sqrt(x - 2) - 5, x) == FiniteSet(27)
assert solveset_real(sqrt(x) - 2 - 5, x) == FiniteSet(49)
assert solveset_real(sqrt(x**3), x) == FiniteSet(0)
assert solveset_real(sqrt(x - 1), x) == FiniteSet(1)
def test_solveset_sqrt_2():
# http://tutorial.math.lamar.edu/Classes/Alg/SolveRadicalEqns.aspx#Solve_Rad_Ex2_a
assert solveset_real(sqrt(2*x - 1) - sqrt(x - 4) - 2, x) == \
FiniteSet(S(5), S(13))
assert solveset_real(sqrt(x + 7) + 2 - sqrt(3 - x), x) == \
FiniteSet(-6)
# http://www.purplemath.com/modules/solverad.htm
assert solveset_real(sqrt(17*x - sqrt(x**2 - 5)) - 7, x) == \
FiniteSet(3)
eq = x + 1 - (x**4 + 4*x**3 - x)**Rational(1, 4)
assert solveset_real(eq, x) == FiniteSet(-S(1)/2, -S(1)/3)
eq = sqrt(2*x + 9) - sqrt(x + 1) - sqrt(x + 4)
assert solveset_real(eq, x) == FiniteSet(0)
eq = sqrt(x + 4) + sqrt(2*x - 1) - 3*sqrt(x - 1)
assert solveset_real(eq, x) == FiniteSet(5)
eq = sqrt(x)*sqrt(x - 7) - 12
assert solveset_real(eq, x) == FiniteSet(16)
eq = sqrt(x - 3) + sqrt(x) - 3
assert solveset_real(eq, x) == FiniteSet(4)
eq = sqrt(2*x**2 - 7) - (3 - x)
assert solveset_real(eq, x) == FiniteSet(-S(8), S(2))
# others
eq = sqrt(9*x**2 + 4) - (3*x + 2)
assert solveset_real(eq, x) == FiniteSet(0)
assert solveset_real(sqrt(x - 3) - sqrt(x) - 3, x) == FiniteSet()
eq = (2*x - 5)**Rational(1, 3) - 3
assert solveset_real(eq, x) == FiniteSet(16)
assert solveset_real(sqrt(x) + sqrt(sqrt(x)) - 4, x) == \
FiniteSet((-S.Half + sqrt(17)/2)**4)
eq = sqrt(x) - sqrt(x - 1) + sqrt(sqrt(x))
assert solveset_real(eq, x) == FiniteSet()
eq = (sqrt(x) + sqrt(x + 1) + sqrt(1 - x) - 6*sqrt(5)/5)
ans = solveset_real(eq, x)
ra = S('''-1484/375 - 4*(-1/2 + sqrt(3)*I/2)*(-12459439/52734375 +
114*sqrt(12657)/78125)**(1/3) - 172564/(140625*(-1/2 +
sqrt(3)*I/2)*(-12459439/52734375 + 114*sqrt(12657)/78125)**(1/3))''')
rb = S(4)/5
assert all(abs(eq.subs(x, i).n()) < 1e-10 for i in (ra, rb)) and \
len(ans) == 2 and \
set([i.n(chop=True) for i in ans]) == \
set([i.n(chop=True) for i in (ra, rb)])
assert solveset_real(sqrt(x) + x**Rational(1, 3) +
x**Rational(1, 4), x) == FiniteSet(0)
assert solveset_real(x/sqrt(x**2 + 1), x) == FiniteSet(0)
eq = (x - y**3)/((y**2)*sqrt(1 - y**2))
assert solveset_real(eq, x) == FiniteSet(y**3)
# issue 4497
assert solveset_real(1/(5 + x)**(S(1)/5) - 9, x) == \
FiniteSet(-295244/S(59049))
@XFAIL
def test_solve_sqrt_fail():
# this only works if we check real_root(eq.subs(x, S(1)/3))
# but checksol doesn't work like that
eq = (x**3 - 3*x**2)**Rational(1, 3) + 1 - x
assert solveset_real(eq, x) == FiniteSet(S(1)/3)
@slow
def test_solve_sqrt_3():
R = Symbol('R')
eq = sqrt(2)*R*sqrt(1/(R + 1)) + (R + 1)*(sqrt(2)*sqrt(1/(R + 1)) - 1)
sol = solveset_complex(eq, R)
assert sol == FiniteSet(*[S(5)/3 + 4*sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3,
-sqrt(10)*cos(atan(3*sqrt(111)/251)/3)/3 + 40*re(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9 +
sqrt(30)*sin(atan(3*sqrt(111)/251)/3)/3 + S(5)/3 +
I*(-sqrt(30)*cos(atan(3*sqrt(111)/251)/3)/3 -
sqrt(10)*sin(atan(3*sqrt(111)/251)/3)/3 + 40*im(1/((-S(1)/2 -
sqrt(3)*I/2)*(S(251)/27 + sqrt(111)*I/9)**(S(1)/3)))/9)])
# the number of real roots will depend on the value of m: for m=1 there are 4
# and for m=-1 there are none.
eq = -sqrt((m - q)**2 + (-m/(2*q) + S(1)/2)**2) + sqrt((-m**2/2 - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2 + (m**2/2 - m - sqrt(
4*m**4 - 4*m**2 + 8*m + 1)/4 - S(1)/4)**2)
unsolved_object = ConditionSet(q, Eq((-2*sqrt(4*q**2*(m - q)**2 +
(-m + q)**2) + sqrt((-2*m**2 - sqrt(4*m**4 - 4*m**2 + 8*m + 1) -
1)**2 + (2*m**2 - 4*m - sqrt(4*m**4 - 4*m**2 + 8*m + 1) - 1)**2
)*Abs(q))/Abs(q), 0), S.Reals)
assert solveset_real(eq, q) == unsolved_object
def test_solve_polynomial_symbolic_param():
assert solveset_complex((x**2 - 1)**2 - a, x) == \
FiniteSet(sqrt(1 + sqrt(a)), -sqrt(1 + sqrt(a)),
sqrt(1 - sqrt(a)), -sqrt(1 - sqrt(a)))
# issue 4507
assert solveset_complex(y - b/(1 + a*x), x) == \
FiniteSet((b/y - 1)/a) - FiniteSet(-1/a)
# issue 4508
assert solveset_complex(y - b*x/(a + x), x) == \
FiniteSet(-a*y/(y - b)) - FiniteSet(-a)
def test_solve_rational():
assert solveset_real(1/x + 1, x) == FiniteSet(-S.One)
assert solveset_real(1/exp(x) - 1, x) == FiniteSet(0)
assert solveset_real(x*(1 - 5/x), x) == FiniteSet(5)
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
assert solveset_real((x**2/(7 - x)).diff(x), x) == \
FiniteSet(S(0), S(14))
def test_solveset_real_gen_is_pow():
assert solveset_real(sqrt(1) + 1, x) == EmptySet()
def test_no_sol():
assert solveset_real(4, x) == EmptySet()
assert solveset_real(exp(x), x) == EmptySet()
assert solveset_real(x**2 + 1, x) == EmptySet()
assert solveset_real(-3*a/sqrt(x), x) == EmptySet()
assert solveset_real(1/x, x) == EmptySet()
assert solveset_real(-(1 + x)/(2 + x)**2 + 1/(2 + x), x) == \
EmptySet()
def test_sol_zero_real():
assert solveset_real(0, x) == S.Reals
assert solveset(0, x, Interval(1, 2)) == Interval(1, 2)
assert solveset_real(-x**2 - 2*x + (x + 1)**2 - 1, x) == S.Reals
def test_no_sol_rational_extragenous():
assert solveset_real((x/(x + 1) + 3)**(-2), x) == EmptySet()
assert solveset_real((x - 1)/(1 + 1/(x - 1)), x) == EmptySet()
def test_solve_polynomial_cv_1a():
"""
Test for solving on equations that can be converted to
a polynomial equation using the change of variable y -> x**Rational(p, q)
"""
assert solveset_real(sqrt(x) - 1, x) == FiniteSet(1)
assert solveset_real(sqrt(x) - 2, x) == FiniteSet(4)
assert solveset_real(x**Rational(1, 4) - 2, x) == FiniteSet(16)
assert solveset_real(x**Rational(1, 3) - 3, x) == FiniteSet(27)
assert solveset_real(x*(x**(S(1) / 3) - 3), x) == \
FiniteSet(S(0), S(27))
def test_solveset_real_rational():
"""Test solveset_real for rational functions"""
assert solveset_real((x - y**3) / ((y**2)*sqrt(1 - y**2)), x) \
== FiniteSet(y**3)
# issue 4486
assert solveset_real(2*x/(x + 2) - 1, x) == FiniteSet(2)
def test_solveset_real_log():
assert solveset_real(log((x-1)*(x+1)), x) == \
FiniteSet(sqrt(2), -sqrt(2))
def test_poly_gens():
assert solveset_real(4**(2*(x**2) + 2*x) - 8, x) == \
FiniteSet(-Rational(3, 2), S.Half)
@XFAIL
def test_uselogcombine_1():
assert solveset_real(log(x - 3) + log(x + 3), x) == \
FiniteSet(sqrt(10))
assert solveset_real(log(x + 1) - log(2*x - 1), x) == FiniteSet(2)
assert solveset_real(log(x + 3) + log(1 + 3/ | x) - 3) == FiniteSet(
-3 + sqrt(-12 + exp(3))*exp(S(3)/2)/2 + exp(3)/2,
-sqrt(-12 + exp(3))*exp(S(3)/2)/2 - 3 + exp(3)/2)
@XFAIL
def test_uselogcombine_2():
eq = z - log(x) + log(y/(x*(-1 + y**2/x**2)))
assert solveset_real(eq, x) == \
FiniteSet(-sqrt(y*(y - exp(z))), sqrt(y*(y - exp(z))))
def test_solve_abs():
assert solveset_ | real(Abs(x) - 2, x) == FiniteSet(-2, 2)
assert solveset_real(Abs(x + 3) - 2*Abs(x - 3), x) == \
FiniteSet(1, 9)
assert solveset_real(2*Abs(x) - Abs(x - 1), x) == \
FiniteSet(-1, Rational(1, 3))
assert solveset_real(Abs(x - 7) - 8, x) == FiniteSet(-S(1), S(15))
# issue 9565
assert solveset_real(Abs((x - 1)/(x - 5)) <= S(1)/3, x) == Interval(-1, 2)
# issue #10069
eq = abs(1/(x - 1)) - 1 > 0
u = Union(Interval.open(0, 1), Interval.open(1, 2))
assert solveset_real(e |
xception):
"""Raised when a node fails to start correctly."""
class ErrorMatch(Enum):
FULL_TEXT = 1
FULL_REGEX = 2
PARTIAL_REGEX = 3
class TestNode():
"""A class for representing a bitcoind node under test.
This class contains:
- state about the node (whether it's running, etc)
- a Python subprocess.Popen object representing the running process
- an RPC connection to the node
- one or more P2P connections to the node
To make things easier for the test writer, any unrecognised messages will
be dispatched to the RPC connection."""
def __init__(self, i, datadir, *, chain, rpchost, timewait, bitcoind, bitcoin_cli, coverage_dir, cwd, extra_conf=None, extra_args=None, use_cli=False, start_perf=False, use_valgrind=False, version=None, block_fork_1_0=0):
"""
Kwargs:
start_perf (bool): If True, begin profiling the node with `perf` as soon as
the node starts.
"""
self.index = i
self.datadir = datadir
self.bitcoinconf = os.path.join(self.datadir, "bitcoin.conf")
self.stdout_dir = os.path.join(self.datadir, "stdout")
self.stderr_dir = os.path.join(self.datadir, "stderr")
self.chain = chain
self.rpchost = rpchost
self.rpc_timeout = timewait
self.binary = bitcoind
self.coverage_dir = coverage_dir
self.cwd = cwd
if extra_conf is not None:
append_config(datadir, extra_conf)
# Most callers will just need to add extra args to the standard list below.
# For those callers that need more flexibility, they can just set the args property directly.
# Note that common args are set in the config file (see initialize_datadir)
self.extra_args = extra_args
self.version = version
# Configuration for logging is set as command-line args rather than in the bitcoin.conf file.
# This means that starting a bitcoind using the temp dir to debug a failed test won't
# spam debug.log.
print("Block fork "+str(block_fork_1_0))
self.args = [
self.binary,
"-datadir=" + self.datadir,
"-logtimemicros",
"-debug",
"-debugexclude=libevent",
"-debugexclude=leveldb",
"-uacomment=testnode%d" % i,
"-testnetNewLogicBlockNumber="+str(block_fork_1_0)
]
if use_valgrind:
default_suppressions_file = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..", "..", "..", "contrib", "valgrind.supp")
suppressions_file = os.getenv("VALGRIND_SUPPRESSIONS_FILE",
default_suppressions_file)
self.args = ["valgrind", "--suppressions={}".format(suppressions_file),
"--gen-suppressions=all", "--exit-on-first-error=yes",
"--error-exitcode=1", "--quiet"] + self.args
if self.version is None or self.version >= 190000:
self.args.append("-logthreadnames")
self.cli = TestNodeCLI(bitcoin_cli, self.datadir)
self.use_cli = use_cli
self.start_perf = start_perf
self.running = False
self.process = None
self.rpc_connected = False
self.rpc = None
self.url = None
self.log = logging.getLogger('TestFramework.node%d' % i)
self.cleanup_on_exit = True # Whether to kill the node when this object goes away
# Cache perf subprocesses here by their data output filename.
self.perf_subprocesses = {}
self.p2ps = []
AddressKeyPair = collections.namedtuple('AddressKeyPair', ['address', 'key'])
PRIV_KEYS = [
# address , privkey
AddressKeyPair('Y8g8qJKQBvzaQLzyBfwzq3ochnDf2yJN4W', 'XLhuyJ8A62v5mqpzxRtUjLNk1 | jasQ88hKhcLo5UebwNCE13Eh4o6'),
AddressKeyPair('YBFPVzbEXB8pKodjyk18CcYHLzTRq8QNrL', 'XSUubLFLG5vL3h3QMLY93fRnLn2tYwomFUYhJh7RwkAUsxSwpvaH'),
AddressKeyPair('YHjuPreds6a2UbztowqctHy92iEbd34rXR', 'XLrbkSZbFCU8z2Y3QUPcgH8TvkgN4f6iva2bVET4XKRgt8Z9iubQ'),
AddressKeyPair('YLUcyhXWz8JbXekmKajg2vZUg1uweW6pWn', 'XRxAth6ux7KQiJ4YY95qms9o7mKsK5ad9iJzWjAMeg8kL1ZyAUUH'),
AddressKeyPair('YF3pqbSzCJF3CQ9jXFMmNDpqj5NENFAw5Q', 'XLwhJc7aVHY | jPzHMuYtD311p9n7krdAwPCiLgyDZSnMQkZsDD5Ed'),
AddressKeyPair('Y4s27oqeta92QGJsRmHLdjBQAQ8256EpWW', 'XLtYHAyoFtu2gu7MQuAEDH8cU8uVQbJkMpgYZKRSnCJbL1aUzKaZ'),
AddressKeyPair('Y8cp27tChD8bJVG7Dtrf1aDeQRLLa1vErv','XQqkHTd8VQe2eMB2oA8obx9rWLfZicWJirT6xVAdGJ9nWgwDEP89'),
AddressKeyPair('Y8WHgiWoKBZxboFVLk1VpPs9u76xiSmD4g', 'XTDroJ2meDFXLj4ERzYmv1PpZDduKMXj4GgNgX1MR9BmRkWnqeUD'),
AddressKeyPair('YF1u3m8z7nn9RStw7XBfHH35GpwYf4wpx9', 'XPrvw7w2QeGMsY19mXdSMHuidZnMcewc38FMK28ua6id3cuQCB4K'),
AddressKeyPair('Y8RMtv2rPfNj3JeUq5Zb6xxvUeyXULz8ki', 'XPayZe8BXgKqarj3ZPRZmtcyqdkMPCpZR6bnF2EnJ1NdyivjTWU9'),
AddressKeyPair('YHU8rSKMYsHKv9JPiuzGD5tabVN1zxfrhy', 'XQC4xmMd8S1tNwKpMcKb2kjotURFtnVUsauNB6eChvsrRkUZKGFM'),
AddressKeyPair('YFzYDbzCkNbViCjkYJ2i9pzfhWpKMQzhcb', 'XMJ75XrddqxpyGeUrsVZs7PbVYKN2s7y5oRNzRY9v2C6S36f58Kd')
# AddressKeyPair('YCYpkfgECJsUSH42FzTgpbjWRqf1E6oRcK', 'XSs57SNVPmiNMmNJx3hRdMycTPhZHnocYLqxWYpFsAaLZUgtXooY')
]
def get_deterministic_priv_key(self):
"""Return a deterministic priv key in base58, that only depends on the node's index"""
assert len(self.PRIV_KEYS) == MAX_NODES
return self.PRIV_KEYS[self.index]
def _node_msg(self, msg: str) -> str:
"""Return a modified msg that identifies this node by its index as a debugging aid."""
return "[node %d] %s" % (self.index, msg)
def _raise_assertion_error(self, msg: str):
"""Raise an AssertionError with msg modified to identify this node."""
raise AssertionError(self._node_msg(msg))
def __del__(self):
# Ensure that we don't leave any bitcoind processes lying around after
# the test ends
if self.process and self.cleanup_on_exit:
# Should only happen on test failure
# Avoid using logger, as that may have already been shutdown when
# this destructor is called.
print(self._node_msg("Cleaning up leftover process"))
self.process.kill()
def __getattr__(self, name):
"""Dispatches any unrecognised messages to the RPC connection or a CLI instance."""
if self.use_cli:
return getattr(self.cli, name)
else:
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(self.rpc, name)
def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
# Add a new stdout and stderr file each time bitcoind is started
if stderr is None:
stderr = tempfile.NamedTemporaryFile(dir=self.stderr_dir, delete=False)
if stdout is None:
stdout = tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False)
self.stderr = stderr
self.stdout = stdout
if cwd is None:
cwd = self.cwd
# Delete any existing cookie file -- if such a file exists (eg due to
# unclean shutdown), it will get overwritten anyway by bitcoind, and
# potentially interfere with our attempt to authenticate
delete_cookie_file(self.datadir, self.chain)
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
self.running = True
self.log.debug("bitcoind started, waiting for RPC to come up")
if self.start_perf:
self._start_perf()
def wait_for_rpc_connection(self):
|
rt db_util
from meerkat_api.resources import locations
from meerkat_api.test.test_data.locations import DEVICE_IDS_CSV_LIST, DEVICEID_1, DEVICE_IDS_IMEI_CSV_LIST, \
LOCATION_NUMBER
from . import settings
class MeerkatAPILocationTestCase(unittest.TestCase):
def setUp(self):
"""Setup for testing"""
meerkat_api.app.config['TESTING'] = True
meerkat_api.app.config['API_KEY'] = ""
self.app = meerkat_api.app.test_client()
meerkat_api.app.app_context().push()
session = db_util.session
db_util.insert_codes(session)
db_util.insert_locations(session)
def tearDown(self):
pass
def test_locations(self):
"""Check locations"""
rv = self.app.get('/locations', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(len(data), LOCATION_NUMBER)
self.assertEqual(set(data.keys()), set([repr(x) for x in range(1, LOCATION_NUMBER + 1)]))
self.assertEqual(data["11"]["name"], "Clinic 5")
self.assertEqual(data["11"]["parent_location"], 6)
self.assertEqual(data["5"]["name"], "Dist | rict 2")
def test_location(s | elf):
"""Check location"""
rv = self.app.get('/location/11', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["name"], "Clinic 5")
self.assertEqual(data["parent_location"], 6)
rv = self.app.get('/location/7', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["name"], "Clinic 1")
self.assertEqual(data["parent_location"], 4)
def test_tot_clinics(self):
"""Check tot_clinics"""
rv = self.app.get('/tot_clinics/1', headers=settings.header)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["total"], 4)
rv = self.app.get('/tot_clinics/2', headers=settings.header)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["total"], 3)
rv = self.app.get('/tot_clinics/3', headers=settings.header)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(rv.status_code, 200)
self.assertEqual(data["total"], 1)
# With clinic type
tot_clinic = locations.TotClinics()
data = tot_clinic.get(1, "SARI")
self.assertEqual(data["total"], 2)
data = tot_clinic.get(1, "Refugee")
self.assertEqual(data["total"], 2)
def test_location_tree(self):
""" Test the location tree """
rv = self.app.get('/locationtree', headers=settings.header)
self.assertEqual(rv.status_code, 200)
data = json.loads(rv.data.decode("utf-8"))
self.assertEqual(data["text"], "Demo")
nodes = data["nodes"]
ids = []
for n in nodes:
ids.append(n["id"])
self.assertIn(2, ids)
self.assertIn(3, ids)
self.assertNotIn(4, ids)
self.assertNotIn(5, ids)
district_level = nodes[0]["nodes"]
ids = []
for n in district_level:
ids.append(n["id"])
self.assertIn(4, ids)
self.assertIn(5, ids)
self.assertNotIn(6, ids)
clinic_level = district_level[0]["nodes"]
ids = []
for n in clinic_level:
ids.append(n["id"])
self.assertIn(7, ids)
self.assertIn(8, ids)
self.assertNotIn(9, ids)
self.assertNotIn(10, ids)
self.assertNotIn(11, ids)
# Test location tree filtering functionality
# A utility function to recursively get the clinics out of the tree
def get_clinics(tree):
children = []
if tree['nodes']:
for child in tree['nodes']:
children += get_clinics(child)
if not child['nodes']:
children += [child['text']]
return children
# Test inc functionality
rv = self.app.get(
'/locationtree?inc_case_types=["pip"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?inc_case_types=["pip"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 2', clinics)
self.assertIn('Clinic 4', clinics)
self.assertIn('Clinic 5', clinics)
self.assertEqual(len(clinics), 3)
rv = self.app.get(
'/locationtree?inc_case_types=["pip","mh"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?inc_case_types=["pip", "mh"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 2', clinics)
self.assertIn('Clinic 1', clinics)
self.assertIn('Clinic 4', clinics)
self.assertIn('Clinic 5', clinics)
self.assertEqual(len(clinics), 4)
# Test exc functionality
rv = self.app.get(
'/locationtree?exc_case_types=["pip"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?exc_case_types=["pip"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 1', clinics)
self.assertIn('Clinic 4', clinics)
self.assertIn('Clinic 5', clinics)
self.assertEqual(len(clinics), 3)
rv = self.app.get(
'/locationtree?exc_case_types=["pip", "mh"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print('/locationtree?exc_case_types=["pip", "mh"]')
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 4', clinics)
self.assertEqual(len(clinics), 1)
# Test both inc and exc functionality
rv = self.app.get(
'/locationtree?inc_case_types=["mh"]&exc_case_types=["pip","mh"]',
headers=settings.header
)
clinics = get_clinics(json.loads(rv.data.decode("utf-8")))
print(
'/locationtree?inc_case_types=["mh"]&exc_case_types=["pip","mh"]'
)
print(json.loads(rv.data.decode("utf-8")))
print(clinics)
self.assertEqual(rv.status_code, 200)
self.assertIn('Clinic 4', clinics)
self.assertEqual(len(clinics), 1)
def test_location_by_non_existing_device_id(self):
for id in ["42", "fake_device_id", DEVICEID_1[1:]]:
rv = self.app.get('locations?deviceId={}'.format(id), headers=settings.header)
self.assertEqual(rv.status_code, 200)
actual_response_json = json.loads(rv.data.decode('utf-8'))
empty_json = {}
self.assertEqual(actual_response_json, empty_json)
def test_location_by_device_id(self):
for id in DEVICE_IDS_CSV_LIST.split(','):
self.validate_correct_location_returned(deviceid=id, expected_loc_id='12')
def test_location_by_device_id_imei_format(self):
for id in DEVICE_IDS_IMEI_CSV_LIST.split(','):
self.validate_correct_location_returned(deviceid=id, expected_loc_id='13')
def validate_correct_location_returned(self, deviceid=None, expected_loc_id=None):
rv = self.app.get('/locations?deviceId={}'.format(deviceid), headers=settings.header)
self.assertEqual(rv.status_code, 200)
actual_json_response = json.loads(rv.data.decode('utf-8'))
self.assertTrue(expected_loc_id in actual_json_response)
actual_loc_id = actual_json_response[expected_l |
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
##### ##### ===== 포함 파일 =====
# 개인적인 아이디, 비밀번호 파일.
from personal.jconfig import LOGIN_ID, LOGIN_PW
# scrapy item 파일.
from joonggonara.items import JoonggonaraItem
# 로그인을 위한 FormRequest.
# 로그인 이후 크롤링을 위한 Request.
from scrapy.http import FormRequest, Request
# 게시판 페이지에서 각 게시글 url을 얻어온 후 url을 Spider에 넣어주기 위한 urljoin.
from urlparse import urljoin
# scrapy를 사용하기 위한 scrapy.
import scrapy
# response에서 ArticleNumber를 얻어내기위한 re.
import re
# file의 존재유무 체크를 위한 os.path
import os.path
# 랜덤 sle | ep을 위한 time, random
import time
import random
# Database를 위한 sqlite3
import sqlite3
##### ##### ===== 포함 파일 끝 =====
##### ##### ===== 전역 변수 지역 =====
CRAWL_TARGET = 0
CRAWL_COUNT = 0
MA | X_PAGE = 0
DOWNLOAD_DELAY = 2
conn = None
cur = None
##### ##### ===== 전역 변수 지역 끝 =====
##### ##### ===== 프로젝트별 변수 =====
# 주요 변수
SPIDER_NAME = 'lgt'
START_URL = 'http://nid.naver.com/nidlogin.login'
BOARD_PAGE_URL = 'http://cafe.naver.com/ArticleList.nhn?search.boardtype=L&userDisplay=50&search.menuid=425&search.questionTab=A&search.clubid=10050146&search.specialmenutype=&search.totalCount=501&search.page=' # SKT - 339, KT - 424, LGT - 425, 여성상의(fup) - 356, 남성상의(mup) - 358
ARTICLE_URL = 'http://cafe.naver.com/ArticleRead.nhn?clubid=10050146&page=1&menuid=425&boardtype=L&articleid='
DATABASE_NAME = 'joonggonara.sqlite'
LIST_DB = 'list_lgt'
DOWNLOADED_DB = 'downloaded_lgt'
# 임시 변수
TARGET_FILE = 'target_lgt.txt'
MAX_FILE = 'max_lgt.txt'
LOGIN_FILE = 'output/login_lgt.html'
ARTICLE_AHREF = '//a[contains(@href, "articleid") and not(contains(@href, "specialmenutype"))]/@href'
SAVE_LOCATION = 'output/lgt/'
##### ##### ===== 프로젝트별 변수 끝 =====
##### ##### ===== 클래스 선언 지역 =====
##### ----- -----
##### 중고나라 스파이더 클래스
##### ----- -----
class Spider(scrapy.Spider):
name = SPIDER_NAME
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 딜레이 설정
download_delay = DOWNLOAD_DELAY
# 로그인을 하고 시작해야함
# 따라서 로그인 페이지에서 시작
start_urls = [
START_URL
]
# 파일로부터 수집할 개수를 읽어옴
# 이렇게 하는 것이 소스코드 수정 없이 수집양을 조절할 수 있음
target_file = open(TARGET_FILE, 'r')
CRAWL_TARGET = int(target_file.readline())
target_file.close()
max_file = open(MAX_FILE, 'r')
MAX_PAGE = int(max_file.readline())
max_file.close()
# 로그인을 하는 함수
def parse(self, response):
# 로그인을 수정하기 위한 부분
# 각 폼에 맞게 id와 pw를 입력
# 이후의 쿠키는 scrapy가 알아서 관리해줌
return scrapy.FormRequest.from_response(
response,
formname='frmNIDLogin',
formdata={'id': LOGIN_ID, 'pw': LOGIN_PW},
clickdata={'nr': 0},
callback=self.after_login
)
# 로그인이후 게시판 List에서 각 게시글 URL을 얻기위한 함수
def after_login(self, response):
# 글로벌 변수를 불러옴
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 로그인 디버깅 용
with open(LOGIN_FILE, 'wb') as f:
f.write(response.body)
f.close()
# Create Database Connector
conn = sqlite3.connect(DATABASE_NAME)
# Create Database Cursor
cur = conn.cursor()
# Create Table
cur.executescript('''
CREATE TABLE IF NOT EXISTS ''' + LIST_DB + ''' (
article_num INTEGER PRIMARY KEY NOT NULL UNIQUE);
''' +
'''
CREATE TABLE IF NOT EXISTS ''' + DOWNLOADED_DB + ''' (
article_num INTEGER PRIMARY KEY NOT NULL UNIQUE);
'''
)
conn.commit()
# 이전 수집때 목표로 저장해둔 리스트 수 불러오기
cur.execute('''
SELECT COUNT(*) FROM ''' + LIST_DB
)
CRAWL_COUNT = CRAWL_COUNT + int(cur.fetchone()[0])
# 로그인 성공 후 게시판에서 각 게시글의 URL을 따옴
return Request(url=BOARD_PAGE_URL + str(1), callback=self.parse_list)
# 수집한 게시판 정보에서 공지사항을 제외한 게시글 URL을 파싱
def parse_list(self, response):
# 글로벌 변수를 불러옴
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 사용자가 작성한 게시글 파악
for ahref in response.xpath(ARTICLE_AHREF).extract():
# 수집 목표량을 채웠을 경우 탈출
if CRAWL_COUNT >= CRAWL_TARGET:
break
# 게시글 번호 파싱
article_num = re.split(r'[?=&]', ahref)[12]
# 이미 받은 게시글일 경우 패스
cur.execute('SELECT * FROM ' + DOWNLOADED_DB + ' WHERE article_num = ' + str(article_num)
)
if cur.fetchone() is not None:
print 'tartget skip: ' + str(article_num)
continue
# 다운로드 대상에 입력
cur.execute('INSERT OR IGNORE INTO ' + LIST_DB + ' (article_num) VALUES (' + str(article_num) + ')'
)
conn.commit()
CRAWL_COUNT = CRAWL_COUNT + 1
# 목표 개수 만큼 리스트를 채웠는지 체크
page_num = int(re.split(r'[=]', response.url)[8])
if ((CRAWL_COUNT >= CRAWL_TARGET) or (page_num >= MAX_PAGE)):
return self.crawl_article()
else:
# 목표 개수 미달인 경우 다음 페이지 불러오기
next_url = BOARD_PAGE_URL + str(page_num+1)
return Request(url=next_url, callback=self.parse_list)
# 게시글 수집
def crawl_article(self):
# 글로벌 변수를 불러옴
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 다운로드 대상 리스트 불러오기
# 참고: yield로 Request를 전송하기 때문에 cur가 동시에 사용될 가능성이 있다
# 따라서 fetchall()로 데이터를 모두 가져와야 한다
cur.execute('SELECT * FROM ' + LIST_DB)
target_list = cur.fetchall()
# Request 보내기
for data in target_list:
# request_url 조립
article_num = data[0]
request_url = ARTICLE_URL + str(article_num)
# Request를 날리기 전 다운로드 대상 리스트에서 제거
cur.execute('DELETE FROM ' + LIST_DB + ' WHERE article_num = ' + str(article_num)
)
conn.commit()
# 랜덤 sleep
time.sleep(random.randint(0, 1))
# 요청 전송
yield Request(request_url, callback = self.parse_article)
# 각 게시글의 원본을 저장
def parse_article(self, response):
# 글로벌 변수를 불러옴.
global CRAWL_TARGET
global CRAWL_COUNT
global MAX_PAGE
global conn
global cur
# 수집한 게시글 다운로드 완료 리스트에 저장
article_num = re.split(r'[?=&]', response.url)[10]
cur.execute('INSERT OR IGNORE INTO ' + DOWNLOADED_DB + ' (article_num) VALUES (' + str(article_num) + ')'
)
conn.commit()
# 수집한 게시글을 파일로 저장
with open(SAVE_LOCATION + article_num + '.html', 'wb') as f:
f.write(response.body)
f.close()
##### ##### ===== 클래스 선언 지역 끝 =====
|
# generated from catkin/cmake/template/pkg.context.pc.in
| CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "".split(';') if "" | != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "".split(';') if "" != "" else []
PROJECT_NAME = "laser_scan_publisher_tutorial"
PROJECT_SPACE_DIR = "/home/robot/bebop_ws/devel"
PROJECT_VERSION = "0.2.1"
|
objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
base_bundle = self.build_bundle(request=request)
objects = self.obj_get_list(bundle=base_bundle, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = []
for obj in to_be_serialized[self._meta.collection_name]:
bundle = self.build_bundle(obj=obj, request=request)
bundles.append(self.full_dehydrate(bundle, for_list=True))
to_be_serialized[self._meta.collection_name] = bundles
to_be_serialized = self.alter_list_data_ | to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
basic_bundle = self.build_bundle(reques | t=request)
try:
obj = self.cached_obj_get(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
basic_bundle = self.build_bundle(request=request)
self.obj_delete_list_for_update(bundle=basic_bundle, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle=bundle, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle, for_list=True) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
if django.VERSION >= (1, 4):
body = request.body
else:
body = request.raw_post_data
deserialized = self.deserialize(request, body, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle=bundle, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObje |
# -*- | encoding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in root directory
######### | #####################################################################
from openerp import fields, models, api
import logging
_logger = logging.getLogger(__name__)
class account_move(models.Model):
_inherit = 'account.move'
number_in_book = fields.Char(
string='Number in Book',
help='This number is set when closing a period or by running a wizard'
)
_sql_constraints = [
('number_in_book_uniq', 'unique(number_in_book, company_id)',
'Number in Book must be unique per Company!')]
@api.multi
def moves_renumber(self, sequence):
_logger.info("Renumbering %d account moves.", len(self.ids))
for move in self:
new_number = sequence.with_context(
fiscalyear_id=move.period_id.fiscalyear_id.id)._next()
move.number_in_book = new_number
|
import xml.etree.ElementTree as et
import os, time
from xml.etree.ElementTree import Element
from PyQt4 import QtCore, QtGui
class Helper():
#initiatlizes the class and prepares an XMLtree for parsing
def __init__(self):
self.tree = et.parse('./data/data.xml')
self.root = self.tree.getroot()
def write(self, filename):
return self.tree.write(filename)
def updateEmployee(self):
eList = []
allfound = []
for employees in self.root:
for person in employees:
for info in person:
eList.append(info.text)
if(len(eList) == 2):
allfound.append(eList)
eList = []
# for x in allfound:
# print x
return allfound
def updateStatus(self):
sList = []
for child in self.root:
if(child.get('MODID') == '2'):
for stat in child:
sList.append(stat.text)
# for x in sList:
# print x
return sList
def removeEmployee(self, toRemove):
for employees in self.root:
for person in employees:
for info in person:
if (info.text == toRemove):
employees.remove(pers | on)
self.write('./data/data.xml')
def removeStatus(self, toRemove):
proj_stats = self.root.find('Proj_statuses')
for status in proj_stats:
#for status in statuses:
if (status.text == toRemove):
proj_stats.remove(status)
self.write('./data/data.xml')
def addEmployee(self, eName, eColor):
emp = Element('Person')
kid = self.root.find(' | Employees')
name = Element('Name')
color = Element('Color')
name.text = eName
color.text = eColor
#print 'to xml: ' + name.text
#print 'to xml: ' + color.text
emp.append(name)
emp.append(color)
kid.append(emp)
self.write('./data/data.xml')
def addStatus(self, sName):
st = Element('Stat')
st.text = sName
kid = self.root.find('Proj_statuses')
kid.append(st)
self.write('./data/data.xml')
if __name__ == "__main__":
A = Helper()
A.updateEmployee()
A.updateStatus()
A.addEmployee('Walter', '0x0000FF')
A.addStatus('Eating')
|
from sofi.ui import TableRow
def test_basic():
assert(str(TableRow()) == "<tr></tr>")
def test_text():
assert(str(TableRow("text")) == "<tr>text</tr>")
def test_custom_class_ident_style_and_attrs():
assert(str(TableRow("te | xt", cl='abclass', ident='123', style="font-size:0.9em;", attrs={"data-test": 'abc'}))
== "<tr id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\" | abc\">text</tr>")
|
""" A simple restful webservice to provide access to the wiki.db"""
import json
from bottle impo | rt Bottle, run, response, static_file, redirect
from dbfunctions import Wikidb
api = Bottle()
db = Wikidb()
@api.route('/static/<filepath:path>')
def static(filepath):
return static_file(filepath, root='./static')
@api.route('/api/search/<term>')
def search(term):
response.headers['Content-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.search(term))
@api.route('/api/detail/<subject>')
def details(subject):
response.headers['Co | ntent-Type'] = 'application/json'
response.headers['Cache-Control'] = 'no-cache'
return json.dumps(db.detail(subject))
if __name__ == '__main__':
# Demonstrates the truely awesome awesomplete drawing data right from the search API above.
@api.route('/search')
def autocompletesearch():
return redirect('/static/autocomplete.html')
db.put('this is an article', 'this is the body of the article.')
db.put('this is another article', 'this is the body of the article.')
db.put('this is a third article', 'this is the body of the article.')
run(api, host='localhost',port=8080, debug=True)
|
import json
import correlation
import category
import tools
import setti | ngs
from matplotlib.backends.backend_pdf import PdfPages
def process_data(data_type, stats, highlights):
print("Starting student da | ta processing.")
all_pdf_path, highlight_pdf_path = (None,None)
question_types, demographic_questions, opinion_questions = (None,)*3
demographic_save_file = None
if data_type == "student":
question_types = settings.student_question_types
all_pdf_path = settings.student_stats_path
highlight_pdf_path = settings.student_stats_highlight_path
demographic_questions = settings.student_demographic_questions
opinion_questions = settings.student_opinion_questions
demographic_save_file = settings.student_categories_highlight_path
elif data_type == "fac_staff":
question_types = settings.fac_staff_question_types
all_pdf_path = settings.fac_staff_stats_path
highlight_pdf_path = settings.fac_staff_stats_highlight_path
demographic_questions = settings.fac_staff_demographics_questions
opinion_questions = settings.fac_staff_opinion_questions
demographic_save_file = settings.fac_staff_categories_highlight_path
data = load_data(data_type)
# correlation calculations
if stats in ["correlation", "all"]:
correlation_to_run = correlation.gen_num_correlations(data, question_types)
correlation_results = correlation.run_num_correlations(correlation_to_run, data)
interesting_correlations = correlation.find_interesting_correlations(
correlation_results, data)
correlation.print_interesting_correlations(interesting_correlations, data)
# plot all correlations
if not highlights:
all_pdf = PdfPages(all_pdf_path)
correlation.plot_correlations(correlation_results, data, all_pdf)
all_pdf.close()
# plot highlight correlations
highlight_pdf = PdfPages(highlight_pdf_path)
correlation.plot_correlations(interesting_correlations, data, highlight_pdf)
highlight_pdf.close()
print("Done with {} correlation stats.".format(data_type))
# category calculations
if stats in ["category", "all"]:
print("Staring demographic processing for {} data.".format(data_type))
base_demographic = category.base_demographic(data, demographic_questions)
answer_response_lists = category.generate_answer_response_lists(
data, opinion_questions)
opinion_demographic_dict = category.generate_demographic_for_response_lists(
answer_response_lists, data)
opinion_demographic_diff_dict = category.calc_demographic_diff(
base_demographic, opinion_demographic_dict)
interesting_demographic_changes = category.find_interesting_demographic_changes(
opinion_demographic_diff_dict)
category.save_interesting_demographics_changes_to_file(
interesting_demographic_changes, demographic_save_file
)
print("Ending {} data processing.".format(data_type))
def load_data(data_type):
print("Loading {} data.".format(data_type))
data = None
file_path = None
if data_type == "student":
file_path = settings.student_clean_path
elif data_type == "fac_staff":
file_path = settings.fac_staff_clean_path
print("Opening: {}".format(file_path))
with open(file_path, "r") as f:
print("Reading JSON into memory.")
data = json.loads(f.read())
print("Loaded {} {} records.".format(len(data), data_type))
print("Done loading {} data.".format(data_type))
return data
|
"""
The setup package to install MasterQA dependencies
"""
from setuptools import setup, find_packages # noqa
import os
import sys
this_directory = os.path.abspath(os.path.dirname(__file__))
long_description = None
total_description = None
try:
with open(os.path.join(this_directory, 'README.md'), 'rb') as f:
total_description = f.read().decode('utf-8')
description_lines = total_description.split('\n')
long_description_lines = []
for line in description_lines:
if not line.startswith("<meta ") and not line.startswith("<link "):
long_description_lines.append(line)
long_description = "\n".join(long_description_lines)
except IOError:
long_description = (
'Automation-Assisted Manual Testing - https://masterqa.com')
if sys.argv[-1] == 'publish':
reply = None
input_method = input
if not sys.version_info[0] >= 3:
input_method = raw_input # noqa
reply = str(input_method(
'>>> Confirm release PUBLISH to PyPI? (yes/no): ')).lower().strip()
if reply == 'yes':
print("\n*** Checking code health with flake8:\n")
os.system("python -m pip install 'flake8==3.9.2'")
flake8_status = os.system("flake8 --exclude=temp")
if flake8_status != 0:
print("\nWARNING! Fix flake8 issues before publishing to PyPI!\n")
sys.exit()
else:
print("*** No flake8 issues detected. Continuing...")
print("\n*** Rebuilding distribution packages: ***\n")
| os.system('rm -f dist/*.egg; rm -f dist/*.tar.gz; rm -f dist/*.whl')
os.system('python setup.py sdist bdist_wheel') # Create new tar/wheel
print("\n*** Installing twine: *** (Required for PyPI uploads)\n")
os.system("python -m pip install 'twine>=1.15.0'")
print("\n*** Installing tqdm: *** (Required for | PyPI uploads)\n")
os.system("python -m pip install --upgrade 'tqdm>=4.62.2'")
print("\n*** Publishing The Release to PyPI: ***\n")
os.system('python -m twine upload dist/*') # Requires ~/.pypirc Keys
print("\n*** The Release was PUBLISHED SUCCESSFULLY to PyPI! :) ***\n")
else:
print("\n>>> The Release was NOT PUBLISHED to PyPI! <<<\n")
sys.exit()
setup(
name='masterqa',
version='1.6.1',
description='Automation-Assisted Manual Testing - https://masterqa.com',
long_description=long_description,
long_description_content_type='text/markdown',
platforms=["Windows", "Linux", "Mac OS-X"],
url='https://masterqa.com',
author='Michael Mintz',
author_email='mdmintz@gmail.com',
maintainer='Michael Mintz',
license='The MIT License',
install_requires=[
'seleniumbase>=2.4.14',
'sbvirtualdisplay>=1.0.0',
],
packages=['masterqa'],
entry_points={
'nose.plugins': []
}
)
|
# -*- coding: utf-8 -*-
# This file is part of Shoop.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
from django.template.l | oader import get_template
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import get_language
from shoop.apps.provides import (
get_identifier_to_object_map, get_provide_objects
)
from shoop.utils.text import space_case
from shoop.xtheme.plugins.consts import FAL | LBACK_LANGUAGE_CODE
from shoop.xtheme.plugins.forms import GenericPluginForm
SENTINEL = object()
class Plugin(object):
"""
A plugin that can be instantiated within a `shoop.xtheme.layout.LayoutCell`.
Other plugins should inherit from this class and register themselves in the
`xtheme_plugin` provide category.
"""
identifier = None
fields = []
required_context_variables = set()
name = _("Plugin") # User-visible name
editor_form_class = GenericPluginForm
def __init__(self, config):
"""
Instantiate a Plugin with the given `config` dictionary.
:param config: Dictionary of freeform configuration data
:type config: dict
"""
self.config = config
def is_context_valid(self, context):
"""
Check that the given rendering context is valid for rendering this plugin.
By default, just checks `required_context_variables`.
:param context: Rendering context
:type context: jinja2.runtime.Context
:return: True if we should bother trying to render this
:rtype: bool
"""
for key in self.required_context_variables:
if context.get(key, SENTINEL) is SENTINEL:
return False
return True
def render(self, context):
"""
Return the HTML for a plugin in a given rendering context.
:param context: Rendering context
:type context: jinja2.runtime.Context
:return: String of rendered content.
:rtype: str
"""
return "" # pragma: no cover
def get_editor_form_class(self):
"""
Return the form class for editing this plugin.
The form class should either derive from PluginForm, or at least have a `get_config()` method.
Form classes without `fields` are treated the same way as if you'd return `None`,
i.e. no configuration form is presented to the user.
:return: Editor form class
:rtype: class[forms.Form]|None
"""
# Could be overridden in suitably special subclasses.
if self.fields:
return self.editor_form_class
def get_translated_value(self, key, default=None, language=None):
"""
Get a translated value from the plugin's configuration.
It's assumed that translated values are stored in a ``{language: data, ...}`` dictionary
in the plugin configuration blob.
This is the protocol that `shoop.xtheme.plugins.forms.TranslatableField` uses.
If the configuration blob contains such a dictionary, but it does not contain
a translated value in the requested language does not exist, the fallback value, if any,
within that dictionary is tried next. Failing that, the ``default`` value is returned.
:param key: Configuration key
:type key: str
:param default: Default value to return when all else fails.
:param language: Requested language. Defaults to the active language.
:type language: str|None
:return: A translated value.
"""
value = self.config.get(key)
if not value:
return default
if isinstance(value, dict): # It's a dict, so assume it's something from TranslatableField
language = (language or get_language())
if language in value: # The language we requested exists, use that
return value[language]
if FALLBACK_LANGUAGE_CODE in value: # An untranslated fallback exists, use that
return value[FALLBACK_LANGUAGE_CODE]
return default # Fall back to the default, then
return value # Return the value itself; it's probably just something untranslated.
@classmethod
def load(cls, identifier):
"""
Get a plugin class based on the identifier from the `xtheme_plugin` provides registry.
:param identifier: Plugin class identifier
:type identifier: str
:return: A plugin class, or None
:rtype: class[Plugin]|None
"""
return get_identifier_to_object_map("xtheme_plugin").get(identifier)
@classmethod
def get_plugin_choices(cls, empty_label=None):
"""
Get a sorted list of 2-tuples (identifier and name) of available Xtheme plugins.
Handy for `<select>` boxen.
:param empty_label: Label for the "empty" choice. If falsy, no empty choice is prepended
:type empty_label: str|None
:return: List of 2-tuples
:rtype: Iterable[tuple[str, str]]
"""
choices = []
if empty_label:
choices.append(("", empty_label))
for plugin in get_provide_objects("xtheme_plugin"):
if plugin.identifier:
choices.append((
plugin.identifier,
getattr(plugin, "name", None) or plugin.identifier
))
choices.sort()
return choices
class TemplatedPlugin(Plugin):
# TODO: Document `TemplatedPlugin` better!
"""
Convenience base class for plugins that just render a "sub-template" with a given context.
"""
#: The template to render
template_name = ""
#: Variables to copy from the parent context.
inherited_variables = set()
#: Variables to copy from the plugin configuration
config_copied_variables = set()
engine = None # template rendering engine
def get_context_data(self, context):
"""
Get a context dictionary from a Jinja2 context.
:param context: Jinja2 rendering context
:type context: jinja2.runtime.Context
:return: Dict of vars
:rtype: dict[str, object]
"""
vars = {"request": context.get("request")}
for key in self.required_context_variables:
vars[key] = context.get(key)
for key in self.inherited_variables:
vars[key] = context.get(key)
for key in self.config_copied_variables:
vars[key] = self.config.get(key)
return vars
def render(self, context): # doccov: ignore
vars = self.get_context_data(context)
if self.engine:
template = self.engine.get_template(self.template_name)
else:
template = get_template(self.template_name)
return template.render(vars, request=context.get("request"))
def templated_plugin_factory(identifier, template_name, **kwargs):
"""
A factory (akin to `modelform_factory`) to quickly create simple plugins.
:param identifier: The unique identifier for the new plugin.
:type identifier: str
:param template_name: The template file path this plugin should render
:type template_name: str
:param kwargs: Other arguments for the `TemplatedPlugin`/`Plugin` classes.
:type kwargs: dict
:return: New `TemplatedPlugin` subclass
:rtype: class[TemplatedPlugin]
"""
ns = {
"identifier": identifier,
"template_name": template_name,
}
ns.update(kwargs)
ns.setdefault("name", space_case(identifier).title())
return type(str("%sPlugin" % identifier), (TemplatedPlugin,), ns)
|
"""
This module contains a single class that manages the scraping of data
from one or more supermarkets on mysupermarket.co.uk
"""
from datetime import datetime
from os import remove
from os.path import isfile, getmtime
from time import time
from scrapy import signals
from scrapy.crawler import Crawler
from scrapy.utils.project import get_project_settings
from app_config import supermarket_names, supermarket_url, supermarket_filename
from .reactor_control import ReactorControl
from .spiders.mysupermarket import MySupermarketSpider
class CachingScraper():
"""
A "crawler manager" that manages scraping mysupermarket.co.uk for one or
more supermarkets. For each supermarket, it checks the cache file then
creates and starts a crawler if appropriate.
"""
def __init__(self, supermarkets=supermarket_names(), force_refresh=False):
"""Create a CachingScraper for the given supermarket(s).
Keyword arguments:
supermarkets -- a list of supermarkets to scrape
force_refresh -- if True, cachefiles will not be used
"""
self.force_refresh = force_refresh
self.supermarkets = supermarkets
self.reactor_control = ReactorControl()
def cache_exists(self, supermarket):
"""Check whether a JSON file already exists for data scraped from
the given supermarket, and if so, whether it was created today.
Note that 'created today' is not the same as 'age < 24 hours'. Prices
are assumed to change overnight so a cachefile created at 9pm
yesterday is considered out of date at 9am today (but a cachefile
created at 9am is not out of date at 9pm).
Keyword arguments:
supermarket -- the supermarket whose cachefile should be checked
"""
cachefile = supermarket_filename(supermarket)
if not isfile(cachefile):
return False
mtime = datetime.fromtimestamp(getmtime(cachefile))
now = datetime.fromtimestamp(time())
return mtime.day == now.day
def setup_crawler(self, supermarket, reacto | r_control):
"""Set up the Scrapy crawler.
See http://doc.scrapy.org/en/latest/topics/practices.html#run-scrapy-from-a-script.
Keyword arguments:
supermarket -- the supermarket whose crawler should be set up
"""
cachefile = supermarket_filename(supermarket)
if isfile(cachefile):
remove(cachefile)
|
settings = get_project_settings()
url = supermarket_url(supermarket)
settings.set('FEED_URI', supermarket_filename(supermarket))
spider = MySupermarketSpider(url)
crawler = Crawler(settings)
crawler.signals.connect(reactor_control.remove_crawler, signal=signals.spider_closed)
crawler.configure()
crawler.crawl(spider)
crawler.start()
reactor_control.add_crawler()
def get_data(self):
"""Main entry point for the scraper class. Crawl or get data from cache
for the configured supermarkets. Supermarkets are set in __init__.
"""
if self.force_refresh:
supermarkets_to_crawl = self.supermarkets
else:
supermarkets_to_crawl = [x for x in self.supermarkets if not self.cache_exists(x)]
if supermarkets_to_crawl:
reactor_control = ReactorControl()
for supermarket in supermarkets_to_crawl:
self.setup_crawler(supermarket, reactor_control)
reactor_control.start_crawling()
|
from __future__ import unicode_literals
import logging
import operator
import os
import sys
import urllib2
from mopidy import backend, exceptions, models
from mopidy.audio import scan, utils
from mopidy.internal import path
logger = logging.getLogger(__name__)
FS_ENCODING = sys.getfilesystemencoding()
class FileLibraryProvider(backend.LibraryProvider):
"""Library for browsing local files."""
# TODO: get_images that can pull from metadata and/or .folder.png etc?
# TODO: handle playlists?
@property
def root_directory(self):
if not self._media_dirs:
return None
elif len(self._media_dirs) == 1:
uri = path.path_to_uri(self._media_dirs[0]['path'])
else:
uri = 'file:root'
return models.Ref.directory(name='Files', uri=uri)
def __init__(self, backend, config):
super(FileLibraryProvider, self).__init__(backend)
self._media_dirs = list(self._get_media_dirs(config))
self._follow_symlinks = config['file']['follow_symlinks']
self._show_dotfiles = config['file']['show_dotfiles']
self._scanner = scan.Scanner(
ti | meout=config['file']['metadata_t | imeout'])
def browse(self, uri):
logger.debug('Browsing files at: %s', uri)
result = []
local_path = path.uri_to_path(uri)
if local_path == 'root':
return list(self._get_media_dirs_refs())
if not self._is_in_basedir(os.path.realpath(local_path)):
logger.warning(
'Rejected attempt to browse path (%s) outside dirs defined '
'in file/media_dirs config.', uri)
return []
for dir_entry in os.listdir(local_path):
child_path = os.path.join(local_path, dir_entry)
uri = path.path_to_uri(child_path)
if not self._show_dotfiles and dir_entry.startswith(b'.'):
continue
if os.path.islink(child_path) and not self._follow_symlinks:
logger.debug('Ignoring symlink: %s', uri)
continue
if not self._is_in_basedir(os.path.realpath(child_path)):
logger.debug('Ignoring symlink to outside base dir: %s', uri)
continue
name = dir_entry.decode(FS_ENCODING, 'replace')
if os.path.isdir(child_path):
result.append(models.Ref.directory(name=name, uri=uri))
elif os.path.isfile(child_path):
result.append(models.Ref.track(name=name, uri=uri))
result.sort(key=operator.attrgetter('name'))
return result
def lookup(self, uri):
logger.debug('Looking up file URI: %s', uri)
local_path = path.uri_to_path(uri)
try:
result = self._scanner.scan(uri)
track = utils.convert_tags_to_track(result.tags).copy(
uri=uri, length=result.duration)
except exceptions.ScannerError as e:
logger.warning('Failed looking up %s: %s', uri, e)
track = models.Track(uri=uri)
if not track.name:
filename = os.path.basename(local_path)
name = urllib2.unquote(filename).decode(FS_ENCODING, 'replace')
track = track.copy(name=name)
return [track]
def _get_media_dirs(self, config):
for entry in config['file']['media_dirs']:
media_dir = {}
media_dir_split = entry.split('|', 1)
local_path = path.expand_path(
media_dir_split[0].encode(FS_ENCODING))
if not local_path:
logger.debug(
'Failed expanding path (%s) from file/media_dirs config '
'value.',
media_dir_split[0])
continue
elif not os.path.isdir(local_path):
logger.warning(
'%s is not a directory. Please create the directory or '
'update the file/media_dirs config value.', local_path)
continue
media_dir['path'] = local_path
if len(media_dir_split) == 2:
media_dir['name'] = media_dir_split[1]
else:
# TODO Mpd client should accept / in dir name
media_dir['name'] = media_dir_split[0].replace(os.sep, '+')
yield media_dir
def _get_media_dirs_refs(self):
for media_dir in self._media_dirs:
yield models.Ref.directory(
name=media_dir['name'],
uri=path.path_to_uri(media_dir['path']))
def _is_in_basedir(self, local_path):
return any(
path.is_path_inside_base_dir(local_path, media_dir['path'])
for media_dir in self._media_dirs)
|
from datetime import datetime
from django.contrib.auth.decorators import login_required
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from Core.models import RosterAudit, RosterUser
@login_required
@csrf_exempt
def myroster_rows(request):
"""
Obtain all the rows for the connected user is scheduled on.
"""
# Email address
email = request.user.email
# Current year
date_now = datetime.now().date( | )
# Variables
connected_username = ""
collector = []
# Get the name of the user based on email from the roster table
# we not relying on the username that can be obtained via request
# Since anyone can enter anyname and it will be become hard to
# manage.
for user in RosterUser.obje | cts.filter(email=email):
connected_username = user.first_name + " " + user.last_name
for audit in RosterAudit.objects.filter(engineer=connected_username).filter(audit_date_field__gte=date_now).order_by('audit_date_field'):
collector.append(audit.audit_date_field)
return JsonResponse(collector, safe=False)
|
from django.db import models
class Person(models.Model):
name | = models.CharField(max_length=255, default='Robot')
age = models.IntegerField()
class Meta:
app_label = 'test_djangoitem'
class IdentifiedPerson(models.Model):
identifier = models.PositiveIntegerField(primary_key=True)
name = models.CharField(max_length=255)
age = models.IntegerField()
class Meta:
app_label = 'test_djangoitem | '
|
# anchorGenerator
from models.anchor import *
# main function
if __name__=='__main__':
# TEMP: Wipe existing anchors
# anchors = Anchor.all(size=1000)
# Anchor.delete_all(anchors)
# THIS IS TEMPORARY:
anchors = {'Vaccination', 'Vaccinations', 'Vaccine', 'Vaccines', 'Inoculation', 'Immunization', 'Shot', 'Chickenpox', 'Disease', 'Diseases', 'Hepatitis A', | 'Hepatitis B', 'infection', 'infections', 'measles', 'outbreak', 'mumps', 'rabies', 'tetanus', 'virus', 'autism'}
seed = 'vaccination'
for anchor in anchors:
a = Anchor.getOrCreate(anchor)
a.findInstances()
a.save()
"""
query = {
"size": 0,
"query": {
"filtered": {
"query": {
"query_string": {
"query": "*",
"analyze_wildcard": True
}
}
}
},
"aggs": {
"2": {
"terms": {
"field": "title",
"size": 1 | 00,
"order": {
"_count": "desc"
}
}
}
}
}
response = es.search(index="crowdynews"', 'body=query)
retrieved = now()
anchors = {}
# go through each retrieved document
for hit in response['aggregations']['2']['buckets']:
key = hit['key']
if validKey(key):
anchors[key] = hit['doc_count']
addBulk(anchors)
""" |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.test import TestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User
from powerpages.models import Page
from powerpages.sync import PageFileDumper
from powerpages.admin import website_link, sync_status, save_page
from powerpages.signals import page_edited
from .test_sync import BaseSyncTestCase
class WebsiteLinkTestCase(TestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(website_link(None))
def test_empty_url(self):
self.assertEqual(
website_link(Page(url='')),
'<a href="" style="font-weight: normal;"> »</a>'
)
def test_root_url(self):
self.assertEqual(
website_link(Page(url='/')),
'<a href="/" style="font-weight: normal;">/ »</a>'
)
def test_first_level_url(self):
self.assertEqual(
website_link(Page(url='/test/')),
'<a href="/test/" style="font-weight: normal;">'
'/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_second_level_url(self):
self.assertEqual(
website_link(Page(url='/nested/test/')),
'<a href="/nested/test/" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">test</span>/'
' »</a>'
)
def test_file(self):
self.assertEqual(
website_link(Page(url='/robots.txt')),
'<a href="/robots.txt" style="font-weight: normal;">'
'/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
def test_nested_file(self):
self.assertEqual(
website_link(Page(url='/nested/robots.txt')),
'<a href="/nested/robots.txt" style="font-weight: normal;">'
'/nested/<span style="font-weight: bold">robots.txt</span>'
' »</a>'
)
class SyncStatusTestCase(BaseSyncTestCase):
maxDiff = None
def test_no_object(self):
self.assertIsNone(sync_status(None))
def test_file_synced(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
self.assertEqual(
sync_status(page),
'<span style="color: green">File is synced</span>'
)
def test_file_content_differs(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = 'Lorem Ipsum'
page.save()
self.assertEqual(
sync_status(page),
'<span style="color: orange">File content differs</span>'
)
def test_file_is_missing(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
self.assertEqual(
sync_status(page),
'<span style="color: red">File is missing</span>'
)
def test_file_content_differs_modified_in_admin(self):
page = Page.objects.create(
url='/test-page/', template='<h1>Test Page</h1>'
)
PageFileDumper(page).save()
page.title = 'Lorem Ipsum'
page.is_dirty = True # modified in Admin
page.save()
self.assertEqual(
sync_status(page),
'<span style="color:black; font-weight:bold">'
'Changed in Admin!</span><br>'
'<span style="color: orange">File content differs</span>'
)
class SavePageTestCase(TestCase):
maxDiff = None
def setUp(self):
def page_edited_test_handler(sender, **kwargs):
self.page_edited_kwargs = kwargs
self.page_edited_kwargs = None
page_edited.connect(
page_edited_test_handler, dispatch_uid='test_page_edited',
weak=False
)
def tearDown(self):
page_edited.disconnect(dispatch_uid='test_page_edited')
self.page_edited_kwargs = None
def test_create_page(self):
page = Page(url='/test-page/')
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=True)
self.assertIsNotNone(page.pk)
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': True},
self.page_edited_kwargs
)
def test_modify_page(self):
page = Page.objects.create(url='/test-page/', title='Lorem')
page.title = 'Ipsum'
user = User.objects.create_user('admin-user')
save_page(page=page, user=user, created=False)
self.assertEqual(Page.objects.get(pk=page.pk).title, 'Ipsum')
self.assertTrue(page.is_dirty)
self.assertDictContainsSubset(
{'page': page, 'user': user, 'created': False},
self.page_edited_kwargs
)
class SwitchEditModeViewTestCase(TestCase):
maxDiff = None
def setUp(self):
self.url = reverse('switch_edit_mode')
self.staff_member = User.objects.create_user(
'staff_member', password='letmein123', is_staff=True
)
self.super_user = User.objects.create_user(
'super_user', password='letmein123', is_superuser=True
)
self.regular_user = User.objects.create_user(
'regular_user', password='letmein123'
)
Page.objects.create(url='/')
Page.objects.create(url='/test-page/')
def test_enable_edit_mode_staff_member_referrer(self):
self.client.login(username='staff_member', password='letmein123')
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/test-page/')
def test_disable_edit_mode_staff_member_no_referrer(self):
self.client.login(username='staff_member', password='letmein123')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url)
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.a | ssertRedirects(response, ' | /')
def test_enable_edit_mode_super_user_no_referrer(self):
self.client.login(username='super_user', password='letmein123')
response = self.client.get(self.url)
self.assertTrue(self.client.session.get('WEBSITE_EDIT_MODE'))
self.assertRedirects(response, '/')
def test_disable_edit_mode_super_user_referrer(self):
self.client.login(username='super_user', password='letmein123')
session = self.client.session
session['WEBSITE_EDIT_MODE'] = True
session.save()
response = self.client.get(self.url, HTTP_REFERER='/test-page/')
self.assertNotIn('WEBSITE_EDIT_MODE', self.client.session)
self.assertRedirects(response, '/test-page/')
def test_access_forbidden_regular_user(self):
self.client.login(username='regular_user', password='letmein123')
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
)
def test_access_forbidden_anonmous(self):
response = self.client.get(self.url)
self.assertRedirects(
response, '{0}?next={1}'.format(settings.LOGIN_URL, self.url),
fetch_redirect_response=False
)
|
# Copyright (C) 2014 - Oscar Campos <oscar.campos@member.fsf.org>
# This program is Free Software see LICENSE file for details
"""AnacondaPHP is a PHP linting plugin for Sublime Text 3
"""
|
from .plugin_version import anaconda_required_version
from .anaconda_lib.anaconda_plugin import anaconda_version
if anaconda_required_version > anaconda_vers | ion:
raise RuntimeError(
'AnacondaPHP requires version {} or better of anaconda but {} '
'is installed'.format(
'.'.join([str(i) for i in anaconda_required_version]),
'.'.join([str(i) for i in anaconda_version])
)
)
from .commands import *
from .listeners import *
|
# http://www.pythonchallenge.com/pc/def/equality.html
import re
file_ob = open("3.dat", 'r')
ob_read = file_ob.read()
read_arr = list(ob_read)
word = []
def for_loop(): # Loops through array to find solution
for i in range(len(read_arr)):
if (i + 8) > len(read_arr): # To keep index in bounds
break
if not(r | ead_arr[i]).isupper() and (read_arr[i + 1]).isupper() and (read_arr[i + 2]).isupper() and (read_arr[i + 3]).isupper() and(read_arr[i + 4]).islower() and (read_arr[i + 5]).isupper() and (read_arr[i + 6]).isupper() and (read_arr[i + 7]).isupper() and not(read_arr[i + 8]).isupper():
|
word.append(read_arr[i + 4])
print "".join(word)
def reg_ex(): # Uses regex to find the pattern
print "".join( re.findall("[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]", ob_read))
# for_loop()
# reg_ex()
|
"""
Contains the python specific FileManager.
"""
import ast
import re
from pyqode.core.api import TextBlockHelper
from pyqode.core.managers import FileManager
class PyFileManager(FileManager):
"""
Extends file manager to override detect_encoding. With python, we can
detect encoding by reading the two first lines of a file and extracting its
encoding tag.
"""
#: True to fold import statements on open.
fold_imports = False
#: True to fold docstring on open
fold_docstrings = False
def detect_encoding(self, path):
"""
For the implementation of encoding definitions in Python, look at:
- http://www.python.org/dev/peps/pep-0263/
.. note:: code taken and adapted from
```jedi.common.source_to_unicode.detect_encoding```
"""
with open(path, 'rb') as file:
source = file.read()
# take care of line encodings (not in jedi)
source = source.replace(b'\r', b'')
source_str = str(source).replace('\\n', '\n')
| byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'")
if source.startswith(byte_mark):
# UTF-8 byte-order mark
return 'utf-8'
first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0)
possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)",
first_tw | o_lines)
if possible_encoding:
return possible_encoding.group(1)
return 'UTF-8'
def open(self, path, encoding=None, use_cached_encoding=True):
encoding = self.detect_encoding(path)
super(PyFileManager, self).open(
path, encoding=encoding, use_cached_encoding=use_cached_encoding)
try:
folding_panel = self.editor.panels.get('FoldingPanel')
except KeyError:
pass
else:
# fold imports and/or docstrings
blocks_to_fold = []
sh = self.editor.syntax_highlighter
if self.fold_imports and sh.import_statements:
blocks_to_fold += sh.import_statements
if self.fold_docstrings and sh.docstrings:
blocks_to_fold += sh.docstrings
for block in blocks_to_fold:
if TextBlockHelper.is_fold_trigger(block):
folding_panel.toggle_fold_trigger(block)
def clone_settings(self, original):
super(PyFileManager, self).clone_settings(original)
self.fold_docstrings = original.fold_docstrings
self.fold_imports = original.fold_imports
|
from .element import Element
class Anchor(Element):
"""Implements the <a> tag"""
def __init__(self, text=None, href="#", cl=None, ident=None, style=None, attrs=None):
super().__init__(cl=cl, ident=ident, style=style, attrs=attrs)
self.href = href
if text:
self._children.append(text)
def __repr__(self):
return "<Anchor(href='" + self.href + "')>"
def __str__(self):
output = [ "<a" ]
if self.ident:
output.append(" id=\"")
output.append(self.ident)
output.append("\"")
if self.cl:
output.append(" class=\"")
output.append(self.cl)
output.append("\"")
output.append(' href="')
output.append(self.href)
output.append('"')
if self.style:
| output.append(" style=\"")
output.append(self.style)
output.append("\"")
if self.attrs:
for k in self.attrs.keys | ():
output.append(' ' + k + '="' + self.attrs[k] + '"')
output.append(">")
for child in self._children:
output.append(str(child))
output.append("</a>")
return "".join(output)
|
URL = {
| 3304557: {
"production": "https://notacarioca.rio.gov.br/WSNacional/nfse.asmx?wsdl",
"sa | ndbox": "https://homologacao.notacarioca.rio.gov.br/WSNacional/nfse.asmx?wsdl"
}
}
TEMPLATES = {
'send_rps': "GerarNfseEnvio.xml",
'status': "ConsultarNfseEnvio.xml",
'get_nfse': "ConsultarNfseEnvio.xml",
'cancel': "CancelarNfseEnvio.xml"
} |
from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Get-AntiVirusProduct',
'Author': ['@mh4x0f', 'Jan Egil Ring'],
'Description': ('Get antivirus product information.'),
'Background' : True,
'OutputExtension' : None,
'NeedsAdmin' : False,
'OpsecSafe' : True,
'Language' : 'powershell',
'MinLanguageVersion' : '2',
'Comments': [
'http://blog.powershell.no/2011/06/12/use-windows-powershell-to-get-antivirus-product-information/'
]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'ComputerName' : {
'Description' : 'Computername to run the module on, defaults to localhost.',
'Required' : False,
| 'Value' : ''
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Va | lue]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self, obfuscate=False, obfuscationCommand=""):
script = """
function Get-AntiVirusProduct {
[CmdletBinding()]
param (
[parameter(ValueFromPipeline=$true, ValueFromPipelineByPropertyName=$true)]
[Alias('name')]
$ComputerName=$env:computername )
$Query = 'select * from AntiVirusProduct'
$AntivirusProduct = Get-WmiObject -Namespace 'root\SecurityCenter2' -Query $Query @psboundparameters -ErrorVariable myError -ErrorAction 'SilentlyContinue'
switch ($AntiVirusProduct.productState) {
'262144' {$defstatus = 'Up to date' ;$rtstatus = 'Disabled'}
'262160' {$defstatus = 'Out of date' ;$rtstatus = 'Disabled'}
'266240' {$defstatus = 'Up to date' ;$rtstatus = 'Enabled'}
'266256' {$defstatus = 'Out of date' ;$rtstatus = 'Enabled'}
'393216' {$defstatus = 'Up to date' ;$rtstatus = 'Disabled'}
'393232' {$defstatus = 'Out of date' ;$rtstatus = 'Disabled'}
'393488' {$defstatus = 'Out of date' ;$rtstatus = 'Disabled'}
'397312' {$defstatus = 'Up to date' ;$rtstatus = 'Enabled'}
'397328' {$defstatus = 'Out of date' ;$rtstatus = 'Enabled'}
'397584' {$defstatus = 'Out of date' ;$rtstatus = 'Enabled'}
default {$defstatus = 'Unknown' ;$rtstatus = 'Unknown'}
}
$ht = @{}
$ht.Computername = $ComputerName
$ht.Name = $AntiVirusProduct.displayName
$ht.ProductExecutable = $AntiVirusProduct.pathToSignedProductExe
$ht.'Definition Status' = $defstatus
$ht.'Real-time Protection Status' = $rtstatus
New-Object -TypeName PSObject -Property $ht
}
Get-AntiVirusProduct """
for option,values in self.options.iteritems():
if option.lower() != "agent":
if values['Value'] and values['Value'] != '':
if values['Value'].lower() == "true":
# if we're just adding a switch
script += " -" + str(option)
else:
script += " -" + str(option) + " " + str(values['Value'])
script += ' | Out-String | %{$_ + \"`n\"};"`n'+str(self.info["Name"])+' completed!";'
if obfuscate:
script = helpers.obfuscate(self.mainMenu.installPath, psScript=script, obfuscationCommand=obfuscationCommand)
return script
|
from setuptools import setup, find_packages
setup(
name = "FreeCite",
version = "0.1",
py_modules = ['freecite'],
#install requirements
install_requires = [
| 'requests==1.1.0'
],
#author details
author = "James Ravenscroft",
author_email = "ravenscroftj@gmail.com",
description = "A wrapper around the FreeCite REST API",
url = "http://wwww.github.com/ravenscroftj/freecite"
| )
|
# -*- coding: utf-8 -*-
__author__ = 'degibenz'
import logging
log = logging.getLogger(__name__)
log.setLevel(logging.DEBUG)
import json
from aiohttp import web
from core.model import ObjectId
from core.exceptions import *
from models.chat import *
from models.client import Client, Token
__all__ = [
'ChatWS'
]
DEBUG = True
class ChatWS(web.View):
ws = None
response = None
chat = None
client = None
chat_pk = None
client_pk = None
db = None
client_in_chat = None
def __init__(self, request):
try:
self.db = request.app['db']
except(KeyError,):
pass
self.chat_pk = request.match_info.get('id')
super(ChatWS, self).__init__(request)
async def check_receiver(self, receiver: ObjectId):
"""
Метод проверяет, что получатель существует и находится в чате с отправителем.
:param receiver: индификатор получателя
"""
client = Client(
pk=ObjectId(receiver)
)
if self.db:
client.db = self.db
await client.get()
q = {
'chat': self.chat_pk,
'client': ObjectId(receiver)
}
if not await self.client_in_chat.get(**q):
self.client_in_chat.save(**q)
async def prepare_msg(self):
async for msg in self.socket:
content = json.loads(msg.data)
receiver = content.get('receiver', None)
if receiver:
await self.check_receiver(receiver)
receiver = ObjectId(receiver)
msg_obj = MessagesFromClientInChat(
chat=self.chat_pk,
client=self.client_pk,
msg=content.get('msg'),
receiver_message=receiver
)
if self.db:
msg_obj.db = self.db
await msg_obj.save()
for item in self.agents:
await self.notify(
sender=item.get('client_uid'),
message=msg_obj.message_content,
socket=item.get('socket'),
receiver=receiver,
)
async def check_client(self):
token_in_header = self.request.__dict__.get('headers').get('AUTHORIZATION', None)
if not token_in_header:
raise TokeInHeadersNotFound
else:
token = Token()
token.token = token_in_header
if self.db:
token.db = self.db
self.client = await token.find_client_by_key()
if not self.client:
raise TokenIsNotFound
self.client_pk = ObjectId(self.client.get('client'))
async def notify(self, sender: ObjectId, message: str, socket: web.WebSocketResponse, receiver: ObjectId = None, ):
"""
Метод для рассылки сообщений всем участникам или | выбранному пользователю в чате
:param sender:
:param socket:
:param message: текст сообщения
:param receiver: индификатор пользователя, который должен получить это сообщение
"""
async | def _notify():
try:
if not socket.closed:
socket.send_str(
data="{}".format(message)
)
except(Exception,) as error:
error_info = {
'action': 'notify',
'receiver': receiver,
'sender': sender,
'error': '{}'.format(error)
}
log.error(error_info)
if receiver:
message = "@{}: {}".format(receiver, message)
await _notify()
async def mark_client_as_offline(self):
q = {
'chat': self.chat_pk,
'client': self.client_pk
}
await self.client_in_chat.objects.update(
q,
{'$set':
{'online': False}
},
upsert=False
)
@property
def socket(self):
for item in self.agents:
if item.get('client_uid') == self.client_pk:
return item.get('socket')
@property
def agents(self):
result = []
for ws in self.request.app['websockets']:
if ws.get('chat_uid') == self.chat_pk:
result.append(ws)
return result
async def get(self):
try:
self.ws = web.WebSocketResponse()
await self.ws.prepare(self.request)
self.chat_pk = ObjectId(self.chat_pk)
chat = Chat(
pk=self.chat_pk
)
if self.db:
chat.db = self.db
self.chat = await chat.get()
await self.check_client()
self.client_in_chat = ClientsInChatRoom(
chat=self.chat_pk,
client=self.client_pk,
)
if self.db:
self.client_in_chat.db = self.db
await self.client_in_chat.add_person_to_chat()
self.request.app['websockets'].append({
"socket": self.ws,
"client_uid": self.client_pk,
'chat_uid': self.chat_pk
})
for _ws in self.agents:
_ws.get('socket').send_str('%s joined' % self.client_pk)
await self.prepare_msg()
except(Exception,) as error:
self.response = {
'status': False,
'error': "{}".format(error)
}
log.error(self.response)
await self.ws.close()
finally:
return self.ws
|
from .attribute import html_attribute
from .element import VoidElement
class Image(VoidElement):
"""An HTML image (<img>) element.
Images must have an alternate text description that describes the
contents of the image, if the image can not be displayed. In some
cases the alternate text can be empty. For example, if the image just
displays a company logo next to the company's name or if t | he image just
adds an icon next to a textual description of an action.
Example:
>>> image = Image("whiteboard.jpg",
... "A whiteboard filled with mathematical formulas.")
| >>> image.title = "Whiteboards are a useful tool"
"""
def __init__(self, url, alternate_text=""):
super().__init__("img")
self.url = url
self.alternate_text = alternate_text
url = html_attribute("src")
alternate_text = html_attribute("alt")
title = html_attribute("title")
|
from mumax2 import *
# Standard Problem 4
Nx = 32
Ny = 32
Nz = 1
setgridsize(Nx, Ny, Nz)
s | etcellsize(500e-9/Nx, 125e-9/Ny, 3e-9/Nz)
load('micromagnetism')
load('solver/rk12')
setv('Msat', 800e3)
setv('demag_acc', 7)
setv('Aex', 1.3e-11)
setv('alpha', 1)
setv('dt', 1e-12)
setv('m_maxerror', 1./1000)
new_maxabs("my_maxtorque", "torque")
new_maxnorm("maxnorm", "torque")
m=[ [[[1]]], [[[1]]], [[[0]]] ]
setarray('m', m)
t1=getv("maxtorque")
t2=getv("my_maxtorque")
t3=getv("maxnorm")
echo("maxtorque:" + str(t1) + " my_maxtorque:" + str(t2) + " maxnorm:" + str(t3))
if t3 | != t1:
crash
if t3 < t2:
crash
new_maxabs("maxtorquez", "torque.z")
getv("maxtorquez")
printstats()
savegraph("graph.png")
|
"""
Classes used for defining and running nose test suites
"""
import os
from paver.easy import call_task
from pavelib.utils.test import utils as test_utils
from pavelib.utils.test.suites import TestSuite
from pavelib.utils.envs import Env
__test__ = False # do not collect
class NoseTestSuite(TestSuite):
"""
A subclass of TestSuite with extra methods that are specific
to nose tests
"""
def __init__(self, *args, **kwargs):
super(NoseTestSuite, self).__init__(*args, **kwargs)
self.failed_only = kwargs.get('failed_only', False)
self.fail_fast = kwargs.get('fail_fast', False)
self.run_under_coverage = kwargs.get('with_coverage', True)
self.report_dir = Env.REPORT_DIR / self.root
self.test_id_dir = Env.TEST_DIR / self.root
self.test_i | ds = self.test_id_dir / 'noseids'
def __enter__(self):
super(NoseTestSuite, self).__enter__()
self.report_dir.makedirs_p()
self.test_id_dir.makedirs_p()
def __exit__(self, exc_type, exc_value, traceback):
"""
Cleans mongo afer the tests run.
"""
super(NoseTestSuite, self).__exit__(exc_type, | exc_value, traceback)
test_utils.clean_mongo()
def _under_coverage_cmd(self, cmd):
"""
If self.run_under_coverage is True, it returns the arg 'cmd'
altered to be run under coverage. It returns the command
unaltered otherwise.
"""
if self.run_under_coverage:
cmd0, cmd_rest = cmd.split(" ", 1)
# We use "python -m coverage" so that the proper python
# will run the importable coverage rather than the
# coverage that OS path finds.
if not cmd0.endswith('.py'):
cmd0 = "`which {}`".format(cmd0)
cmd = (
"python -m coverage run --rcfile={root}/.coveragerc "
"{cmd0} {cmd_rest}".format(
root=self.root,
cmd0=cmd0,
cmd_rest=cmd_rest,
)
)
return cmd
@property
def test_options_flags(self):
"""
Takes the test options and returns the appropriate flags
for the command.
"""
opts = " "
# Handle "--failed" as a special case: we want to re-run only
# the tests that failed within our Django apps
# This sets the --failed flag for the nosetests command, so this
# functionality is the same as described in the nose documentation
if self.failed_only:
opts += "--failed"
# This makes it so we use nose's fail-fast feature in two cases.
# Case 1: --fail_fast is passed as an arg in the paver command
# Case 2: The environment variable TESTS_FAIL_FAST is set as True
env_fail_fast_set = (
'TESTS_FAIL_FAST' in os.environ and os.environ['TEST_FAIL_FAST']
)
if self.fail_fast or env_fail_fast_set:
opts += " --stop"
return opts
class SystemTestSuite(NoseTestSuite):
"""
TestSuite for lms and cms nosetests
"""
def __init__(self, *args, **kwargs):
super(SystemTestSuite, self).__init__(*args, **kwargs)
self.test_id = kwargs.get('test_id', self._default_test_id)
self.fasttest = kwargs.get('fasttest', False)
def __enter__(self):
super(SystemTestSuite, self).__enter__()
@property
def cmd(self):
cmd = (
'./manage.py {system} test --verbosity={verbosity} '
'{test_id} {test_opts} --traceback --settings=test'.format(
system=self.root,
verbosity=self.verbosity,
test_id=self.test_id,
test_opts=self.test_options_flags,
)
)
return self._under_coverage_cmd(cmd)
@property
def _default_test_id(self):
"""
If no test id is provided, we need to limit the test runner
to the Djangoapps we want to test. Otherwise, it will
run tests on all installed packages. We do this by
using a default test id.
"""
# We need to use $DIR/*, rather than just $DIR so that
# django-nose will import them early in the test process,
# thereby making sure that we load any django models that are
# only defined in test files.
default_test_id = "{system}/djangoapps/* common/djangoapps/*".format(
system=self.root
)
if self.root in ('lms', 'cms'):
default_test_id += " {system}/lib/*".format(system=self.root)
if self.root == 'lms':
default_test_id += " {system}/tests.py".format(system=self.root)
return default_test_id
class LibTestSuite(NoseTestSuite):
"""
TestSuite for edx-platform/common/lib nosetests
"""
def __init__(self, *args, **kwargs):
super(LibTestSuite, self).__init__(*args, **kwargs)
self.test_id = kwargs.get('test_id', self.root)
self.xunit_report = self.report_dir / "nosetests.xml"
@property
def cmd(self):
cmd = (
"nosetests --id-file={test_ids} {test_id} {test_opts} "
"--with-xunit --xunit-file={xunit_report} "
"--verbosity={verbosity}".format(
test_ids=self.test_ids,
test_id=self.test_id,
test_opts=self.test_options_flags,
xunit_report=self.xunit_report,
verbosity=self.verbosity,
)
)
return self._under_coverage_cmd(cmd)
|
from o | sweb.projects.ManageProject import ManageProject
from osweb.projects.projects_d | ata import ProjectsData |
#!/usr/bin/env python
# DummyMP - Multiprocessing Library for Dummies!
# Copyright 2014 Albert Huang.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# DummyMP Library - Logging Redirect Handler
# multiprocessing library for dummies!
# (library for easily running functions in parallel)
#
import logging
import config
import os
class DummyMPLogHandler(logging.Handler):
"""DummyMP logging handler to allow multiprocess logging.
This class is a custom logging handler to allow spawned processes
(from :py:mod:`multiprocessing`) to log without any issues. This
works by intercepting emitted log records, and sending them via
queue to the master process. The master process will process each
record and call :py:meth:`logging.Logger.handle` to emit the
logging record at the master process level.
Note that this class can be used as a general multiprocess logging
handler simply by removing the int_pid attribute.
Attributes:
queue (:py:class:`multiprocessing.Queue`): The Queue object to
forward logging records to.
int_pid (int | ): The internal PID used to reference the process.
"""
| def __init__(self, int_pid, queue):
"""Initializes DummyMPLogHandler with the inputted internal PID
and Queue object."""
logging.Handler.__init__(self)
self.queue = queue
self.int_pid = int_pid
def emit(self, record):
"""Method override to forward logging records to the internal
Queue object."""
try:
# Format: [ [queueMsgID, PID, internal PID], record ]
self.queue.put([[config.DUMMYMP_LOG_ID, os.getpid(), self.int_pid], record])
except:
# Something went wrong...
self.handleError(record)
|
name0_1_1_0_0_2_0 = N | one
name0_1_1_0_0_2_1 = None
name0_1_1_0_0_2_2 = None
name0_1_1_0_0_2_3 = None
|
name0_1_1_0_0_2_4 = None |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.