prefix stringlengths 0 918k | middle stringlengths 0 812k | suffix stringlengths 0 962k |
|---|---|---|
# -*- mode: python; coding: utf-8; -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class KeyValueSerializer(object):
def __init__(self, event, inline=()):
s | elf.event = event
self.inline = inline
def format_string(self):
return ' '.join(
'{}=%s'.format(k)
if k not in self.inline
else '{}="{}"'.format(k, str(v).encode('unicode_escape').decode())
for k, v in self.event.items()
)
def arguments(self):
ret | urn [v for k, v in self.event.items(omit=self.inline)]
|
import shopify
from test.test_help | er import TestCase
class EventTest(TestCase):
def test_prefix_uses_resource(self):
prefix = shopify.Event._prefix(options={"resource": "orders", "resource_id": 42})
self.assertEqual("https://this-is-my-test-show.myshopify.com/admin/api/unstable/orders/42", prefix)
def test_prefix_doesnt_need_resource(self):
prefix = shopify.Event._prefix()
self.assertEqual("https://this-is-my-test-show.myshopify.com/admin/api/unstable", pref | ix)
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.chat.TTSCWhiteListTerminal
from otp.speedchat.SCTerminal import SCTerminal
from otp.otpbase.OTPLocalizer import SpeedChatStaticText
SCStaticTextMsgEvent = 'SCStaticTextMsg'
class TTSCWhiteListTerminal(SCTerminal):
def __init__(self, textId, parentMenu = None):
SCTerminal.__init__(self)
self.parentClass = parentMenu
sel | f.textId = textId
self.text = SpeedChatStaticText[self.textId]
print 'SpeedText %s %s' % (self.textId, self.text)
def handleSelect(self):
SCTerminal.handleSelect(self)
if not self.parentClass.whisperAvatarId:
base.localAvatar.chatMgr.fsm.request('whiteListOpenChat')
else:
base.localAvatar.chatMgr.fsm.request('whiteListAvatarChat', [self.parentClass.whisperAva | tarId]) |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOS | T IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENT | ATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_rebel_brigadier_general_sullustan_male.iff"
result.attribute_template_id = 9
result.stfName("npc_name","sullustan_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
"""
9012 : 괄호
URL : https: | //www.acmicpc.net/problem/9012
Input :
6
(())())
(((()())()
(()())((()))
((()()(()))(((())))()
()()()()(()()())()
(()((())()(
| Output :
NO
NO
YES
NO
YES
NO
"""
N = int(input())
for _ in range(N):
ps = input()
if ps[-1] is '(':
print("NO")
else:
count = 0
for c in ps:
if c is '(':
count += 1
elif c is ')':
count -= 1
if count < 0:
break
if count is 0:
print("YES")
else:
print("NO")
|
import configparser
parser = c | onfigparser.SafeConfigParser()
parser.add_section('bug_tracker')
parser.set('bug_tracker', 'url', 'http://localhost:8080/bugs')
parser.set('bug_tracker', 'username', 'dhellmann')
parser.set('bug_tracker', 'password', 'secret')
for section in parser.sections():
print(section)
for name, value in parser.items(section):
| print(' {} = {!r}'.format(name, value))
|
import unittest
from .. import views
class TestViews(unittest.TestCase):
def setUp(s | elf):
pass
def test_nothing(self): |
views
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sys
from intelmq.lib import utils
from intelmq.lib.bot import Bot
from intelmq.lib.message import Event
class MalwareGroupIPsParserBot(Bot):
def process(self):
report = self.receive_message()
if not report:
self.acknowledge_message()
return
if not report.contains("raw"):
self.acknowledge_message()
raw_report = utils.base64_decode(report.value("raw"))
raw_report = raw_report.split("<tbody>")[1]
raw_report = raw_report.split("</tbody>")[0]
raw_report_splitted = raw_report.split("<tr>")
for row in raw_report_splitted:
row = row.strip()
if row == "":
continue
row_splitted = row.split("<td>")
ip = row_splitted[1].split('">')[1].split("<")[0].strip()
time_source = row_splitted[6].replace("</td></tr>", "").strip()
time_source = time_source + | " 00:00:00 UTC"
event = Event(report)
event.add('time.source', time_source, sanitize=True)
event.add('classification.type', u'malware')
event.add('source.ip', ip, sanitize=True)
event.add('raw | ', row, sanitize=True)
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = MalwareGroupIPsParserBot(sys.argv[1])
bot.start()
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import time
from openerp.report import report_sxw
from openerp import pooler
class doctor_disability(report_sxw.rml_parse):
def __init__(self, cr, uid, name, context):
super(doctor_disability, self).__init__(cr, uid, name, context=context)
self.localcontext.update({
'time': time,
'select_type': self.select_type,
'select_age': self.select_age,
'select_diseases': self.select_diseases,
'select_diseases_type': self.select_diseases_type,
'return_street_home': self.return_street_home,
'return_number_phone': self.return_number_phone,
'return_sex': self.return_sex
})
def return_street_home(self, country, state, city):
street = ""
if country:
street += country.title() + " - "
if state:
street += state.title() + " - "
if city:
street += city.title() + " - "
return street[:len(street) -2]
def return_number_phone(self, phone, mobile):
return_phone = ""
if phone:
return_phone += phone + " - "
if mobile:
return_phone += mobile + " - "
return return_phone[:len(return_phone)-2]
def return_sex(self, sex):
if sex == 'm':
return "Masculino"
return "Femenino"
def select_type(self, tipo_usuario):
if tipo_usuario:
tipo = self.pool.get('doctor.tipousuario.regimen').browse(self.cr, self.uid, tipo_usuario).name
else:
tipo= None
return tipo
def select_age(self, age):
context = {}
context.update({'lang' : self.pool.get('res.users').browse(self.cr, self.uid, self.uid, context=context).lang})
attentions = self.pool.get('doctor.attentions')
age_unit = dict(attentions.fields_get(self.cr, self.uid, 'age_unit',context=context).get('age_unit').get('selection')).get(
str(age))
return age_unit
def select_diseases(self, status):
if status== 'presumptive':
return "Impresión Diagnóstica"
if st | atus== 'confirm':
return "Confirmado"
if status== 'recurrent':
return "Recurrente"
return ""
def select_diseases_type(self, diseases_type):
if diseases_type== 'main':
return "Principal"
if diseases_type== 'related':
return "Relacionado"
return ""
report_sxw.report_sxw('report.doctor_disability_half', 'doctor.attentions',
'addons/l10n_co_doctor/report/doctor_disability_half.rml',
parser=doctor_disability, header=False)
| |
port database_exists, create_database
from datatables import ColumnDT, DataTables
MS_WD = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_FILE = os.path.join(MS_WD, "api_config.ini")
if os.path.join(MS_WD, 'libs') not in sys.path:
sys.path.append(os.path.join(MS_WD, 'libs'))
import common
Base = declarative_base()
Session = sessionmaker()
class Task(Base):
__tablename__ = "Tasks"
task_id = Column(Integer, primary_key=True)
task_status = Column(String)
sample_id = Column(String, unique=False)
timestamp = Column(DateTime)
def __repr__(self):
return '<Task("{0}","{1}","{2}","{3}")>'.format(
self.task_id, self.task_status, self.sample_id, self.timestamp
)
def to_dict(self):
return {attr.name: getattr(self, attr.name) for attr in self.__table__.columns}
def to_json(self):
return json.dumps(self.to_dict())
class Database(object):
'''
This class enables CRUD operations with the database that holds the task definitions.
Note that in the configuration file, the database type (parameter: db_type) needs to be
a SQLAlchemy dialect: sqlite, mysql, postgresql, oracle, or mssql. The driver can optionally be
specified as well, i.e., 'postgresql+psycopg2' (see http://docs.sqlalchemy.org/en/latest/core/engines.html).
'''
DEFAULTCONF = {
'db_type': 'sqlite',
'host_string': 'localhost',
'db_name': 'task_db',
'username': 'multiscanner',
'password': 'CHANGEME'
}
def __init__(self, config=None, configfile=CONFIG_FILE, regenconfig=False):
self.db_connection_string = None
self.db_engine = None
# Configuration parsing
config_parser = configparser.SafeConfigParser()
config_parser.optionxform = str
# (re)generate conf file if necessary
if regenconfig or not os.path.isfile(configfile):
self._rewrite_config(config_parser, configfile, config)
# now read in and parse the conf file
config_parser.read(configfile)
# If we didn't regen the config file in the above check, it's possible
# that the file is missing our DB settings...
if not config_parser.has_section(self.__class__.__name__):
self._rewrite_config(config_parser, configfile, config)
config_parser.read(configfile)
# If configuration was specified, use what was stored in the config file
# as a base and then override specific settings as contained in the user's
# config. This allows the user to specify ONLY the config settings they want to
# override
config_from_file = dict(config_parser.items(self.__class__.__name__))
if config:
for key_ in config:
config_from_file[key_] = config[key_]
self.config = config_from_file
def _rewrite_config(self, config_parser, configfile, usr_override_config):
"""
Regenerates the Data | base-specific part of the API config file
"""
if os.path.isfile(configfile):
# Read in the old config
config_parser.read(configfile)
if | not config_parser.has_section(self.__class__.__name__):
config_parser.add_section(self.__class__.__name__)
if not usr_override_config:
usr_override_config = self.DEFAULTCONF
# Update config
for key_ in usr_override_config:
config_parser.set(self.__class__.__name__, key_, str(usr_override_config[key_]))
with codecs.open(configfile, 'w', 'utf-8') as conffile:
config_parser.write(conffile)
def init_db(self):
"""
Initializes the database connection based on the configuration parameters
"""
db_type = self.config['db_type']
db_name = self.config['db_name']
if db_type == 'sqlite':
# we can ignore host, username, password, etc
sql_lite_db_path = os.path.join(MS_WD, db_name)
self.db_connection_string = 'sqlite:///{}'.format(sql_lite_db_path)
else:
username = self.config['username']
password = self.config['password']
host_string = self.config['host_string']
self.db_connection_string = '{}://{}:{}@{}/{}'.format(db_type, username, password, host_string, db_name)
self.db_engine = create_engine(self.db_connection_string)
# If db not present AND type is not SQLite, create the DB
if not self.config['db_type'] == 'sqlite':
if not database_exists(self.db_engine.url):
create_database(self.db_engine.url)
Base.metadata.bind = self.db_engine
Base.metadata.create_all()
# Bind the global Session to our DB engine
global Session
Session.configure(bind=self.db_engine)
@contextmanager
def db_session_scope(self):
"""
Taken from http://docs.sqlalchemy.org/en/latest/orm/session_basics.html.
Provides a transactional scope around a series of operations.
"""
ses = Session()
try:
yield ses
ses.commit()
except:
ses.rollback()
raise
finally:
ses.close()
def add_task(self, task_id=None, task_status='Pending', sample_id=None, timestamp=None):
with self.db_session_scope() as ses:
task = Task(
task_id=task_id,
task_status=task_status,
sample_id=sample_id,
timestamp=timestamp,
)
try:
ses.add(task)
# Need to explicitly commit here in order to update the ID in the DAO
ses.commit()
except IntegrityError as e:
print('PRIMARY KEY must be unique! %s' % e)
return -1
created_task_id = task.task_id
return created_task_id
def update_task(self, task_id, task_status, timestamp=None):
with self.db_session_scope() as ses:
task = ses.query(Task).get(task_id)
if task:
task.task_status = task_status
if timestamp:
task.timestamp = datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
return task.to_dict()
def get_task(self, task_id):
with self.db_session_scope() as ses:
task = ses.query(Task).get(task_id)
if task:
# unbind Task from Session
ses.expunge(task)
return task
def get_all_tasks(self):
with self.db_session_scope() as ses:
rs = ses.query(Task).all()
# TODO: For testing, do not use in production
task_list = []
for task in rs:
ses.expunge(task)
task_list.append(task.to_dict())
return task_list
def search(self, params, id_list=None, search_by_value=False, return_all=False):
'''Search according to Datatables-supplied parameters.
Returns results in format expected by Datatables.
'''
with self.db_session_scope() as ses:
fields = [Task.task_id, Task.sample_id, Task.task_status, Task.timestamp]
columns = [ColumnDT(f) for f in fields]
if return_all:
# History page
if id_list is None:
# Return all tasks
query = ses.query(*fields)
else:
# Query all tasks for samples with given IDs
query = ses.query(*fields).filter(Task.sample_id.in_(id_list))
else:
# Analyses page
task_alias = aliased(Task)
sample_subq = (ses.query(task_alias.sample_id,
func.max(task_alias.timestamp).label('ts_max'))
|
/neighbor (list)
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlv | s/tlv/isis_neighbor_attribute/neighbors/neighbor (lis | t)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
except (TypeError, ValueError):
raise ValueError(
{
"error-string": """neighbor must be of a type compatible with list""",
"defined-type": "list",
"generated-type": """YANGDynClass(base=YANGListType(False,neighbor.neighbor, yang_name="neighbor", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='False', extensions=None), is_container='list', yang_name="neighbor", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='list', is_config=False)""",
}
)
self.__neighbor = t
if hasattr(self, "_set"):
self._set()
def _unset_neighbor(self):
self.__neighbor = YANGDynClass(
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
neighbor = __builtin__.property(_get_neighbor)
_pyangbind_elements = OrderedDict([("neighbor", neighbor)])
from . import neighbor
class neighbors(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/isis/levels/level/link-state-database/lsp/tlvs/tlv/isis-neighbor-attribute/neighbors. Each member element of
the container is represented as a class variable - with a specific
YANG type.
YANG Description: This container describes IS neighbors.
"""
__slots__ = ("_path_helper", "_extmethods", "__neighbor")
_yang_name = "neighbors"
_pybind_generated_by = "container"
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__neighbor = YANGDynClass(
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_container="list",
yang_name="neighbor",
parent=self,
path_helper=self._path_helper,
extmethods=self._extmethods,
register_paths=True,
extensions=None,
namespace="http://openconfig.net/yang/network-instance",
defining_module="openconfig-network-instance",
yang_type="list",
is_config=False,
)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [
"network-instances",
"network-instance",
"protocols",
"protocol",
"isis",
"levels",
"level",
"link-state-database",
"lsp",
"tlvs",
"tlv",
"isis-neighbor-attribute",
"neighbors",
]
def _get_neighbor(self):
"""
Getter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor (list)
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
return self.__neighbor
def _set_neighbor(self, v, load=False):
"""
Setter method for neighbor, mapped from YANG variable /network_instances/network_instance/protocols/protocol/isis/levels/level/link_state_database/lsp/tlvs/tlv/isis_neighbor_attribute/neighbors/neighbor (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_neighbor is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_neighbor() directly.
YANG Description: This list defines ISIS extended reachability neighbor
attributes.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(
v,
base=YANGListType(
False,
neighbor.neighbor,
yang_name="neighbor",
parent=self,
is_container="list",
user_ordered=False,
path_helper=self._path_helper,
yang_keys="False",
extensions=None,
),
is_containe |
"""
Support for RESTful binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.rest/
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDevice
from homeassistant.components.sensor.rest import RestData
from homeassistant.const import CONF_VALUE_TEMPLATE
from homeassistant.helpers import template
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = 'REST Binary Sensor'
DEFAULT_METHOD = 'GET'
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Setup REST binary sensors."""
resource = config.get('resource', None)
method = config.get('method', DEFAULT_METHOD)
payload = config.get('payload', None)
ve | rify_ssl = config.get('verify_ssl', True)
rest = RestData(method, resource, payload, verify_ssl)
rest.update()
if rest.data is None:
_LOGGER.error('Unable to fetch Rest data')
return False
add_devices([RestBinarySensor(
hass, rest, config.get('name', DEFAULT_NAME),
config.get(CONF_VALUE_TEMPLATE))])
# pylint: disable=too-many-arguments
class RestBinarySensor(BinarySensorDevice):
"""A REST | binary sensor."""
def __init__(self, hass, rest, name, value_template):
"""Initialize a REST binary sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = False
self._value_template = value_template
self.update()
@property
def name(self):
"""Return the name of the binary sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
if self.rest.data is None:
return False
if self._value_template is not None:
self.rest.data = template.render_with_possible_json_value(
self._hass, self._value_template, self.rest.data, False)
return bool(int(self.rest.data))
def update(self):
"""Get the latest data from REST API and updates the state."""
self.rest.update()
|
# Copyright 2013 Allen Institute
# This file is part of dipde
# dipde is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# dipde is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with dipde. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
import dipde
import io
import os
import sys
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
#long_description = read('README.md')
def prepend_find_packages(*roots):
''' Recursively traverse nested packages under the root directories
'''
packages = []
for root in roots:
packages += [root]
packages += [root + '.' + s for s in find_packages(root)]
return packages
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--junitxml=result.xml']
self.test_args_cov = self.test_args + ['--cov=dipde', '--cov-report=term', '--cov-report=html','--cov-config=.coveragerc']
self.test_suite = True
def run_tests(self):
import pytest
try:
errcode = pytest.main(self.test_args_cov)
except:
errcode = pytest.main(self.test_args)
| sys.exit(errcode)
setup(
name='dipde',
version=dipde.__version__,
url='https://github.com/AllenBrainAtlas/DiPDE',
author='Nicholas Cain',
tests_require=['pytest'],
install_requires=[],
cmdclass={'test': PyTest},
author_email='nicholasc@alleninstitute.org',
description='Numerical solver for coupled population density equations',
long_description='',
packages=prepend_fi | nd_packages('dipde'),
include_package_data=True,
package_data={'':['*.md', '*.txt', '*.cfg']},
platforms='any',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'License :: Apache Software License :: 2.0',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
],
extras_require={
'testing': ['pytest'],
}
)
|
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from typing import (
Any,
AsyncIterator,
Awaitable,
Callable,
Sequence,
Tuple,
Optional,
Iterator,
)
from google.cloud.compute_v1.types import compute
class ListPager:
"""A pager for iterating through ``list`` requests.
This class thinly wraps an initial
:class:`google.cloud.compute_v1.types.NotificationEndpointList` object, and
provides an ``__iter__`` method to iterate through its
``items`` field.
If there are more pages, the ``__iter__`` method will make additional
``List`` requests and continue to iterate
through | the ``items`` field | on the
corresponding responses.
All the usual :class:`google.cloud.compute_v1.types.NotificationEndpointList`
attributes are available on the pager. If multiple requests are made, only
the most recent response is retained, and thus used for attribute lookup.
"""
def __init__(
self,
method: Callable[..., compute.NotificationEndpointList],
request: compute.ListRegionNotificationEndpointsRequest,
response: compute.NotificationEndpointList,
*,
metadata: Sequence[Tuple[str, str]] = ()
):
"""Instantiate the pager.
Args:
method (Callable): The method that was originally called, and
which instantiated this pager.
request (google.cloud.compute_v1.types.ListRegionNotificationEndpointsRequest):
The initial request object.
response (google.cloud.compute_v1.types.NotificationEndpointList):
The initial response object.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
self._method = method
self._request = compute.ListRegionNotificationEndpointsRequest(request)
self._response = response
self._metadata = metadata
def __getattr__(self, name: str) -> Any:
return getattr(self._response, name)
@property
def pages(self) -> Iterator[compute.NotificationEndpointList]:
yield self._response
while self._response.next_page_token:
self._request.page_token = self._response.next_page_token
self._response = self._method(self._request, metadata=self._metadata)
yield self._response
def __iter__(self) -> Iterator[compute.NotificationEndpoint]:
for page in self.pages:
yield from page.items
def __repr__(self) -> str:
return "{0}<{1!r}>".format(self.__class__.__name__, self._response)
|
__author_ | _ = 'Pe | r'
|
#!/usr/bin/env python
import TransferErrors as TE
import cPickle as pic | kle
with open('stuck.pkl','rb') as pklfile:
stuck = pickle.load(pklfile)
TE.makeBasicTable(stuck,TE | .workdir+'html/table.html',TE.webdir+'table.html')
TE.makeCSV(stuck,TE.webdir+'data.csv')
for basis in [-6,-5,-4,-3,-1,1,2]:
TE.makeJson(stuck,TE.webdir+('stuck_%i'%basis).replace('-','m')+'.json',basis)
|
"""
Certificate generation module.
"""
from OpenSSL import crypto
TYPE_RSA = crypto.TYPE_RSA
TYPE_DSA = crypto.TYPE_DSA
def createKeyPair(type, bits):
"""
Create a public/private key pair.
Arguments: type - Key type, must be one of TYPE_RSA and TYPE_DSA
bits - Number of bits to use in the key
Returns: The public/private key pair in a PKey object
"""
pkey = crypto.PKey()
pkey.generate_key(type, bits)
return pkey
def createCertRequest(pkey, digest="sha256", **name):
"""
Create a certificate request.
Arguments: pkey - The key to associate with the request
digest - Digestion method to use for signing, default is md5
**name - The name of the subject of the request, possible
arguments are:
C - C | ountry name
ST - State or province name
L - Locality name
O - Organization name
OU - Organizational unit name
CN | - Common name
emailAddress - E-mail address
Returns: The certificate request in an X509Req object
"""
req = crypto.X509Req()
subj = req.get_subject()
for (key,value) in name.items():
setattr(subj, key, value)
req.set_pubkey(pkey)
req.sign(pkey, digest)
return req
def createCertificate(req, (issuerCert, issuerKey), serial, (notBefore, notAfter), digest="sha256"):
"""
Generate a certificate given a certificate request.
Arguments: req - Certificate reqeust to use
issuerCert - The certificate of the issuer
issuerKey - The private key of the issuer
serial - Serial number for the certificate
notBefore - Timestamp (relative to now) when the certificate
starts being valid
notAfter - Timestamp (relative to now) when the certificate
stops being valid
digest - Digest method to use for signing, default is md5
Returns: The signed certificate in an X509 object
"""
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(notBefore)
cert.gmtime_adj_notAfter(notAfter)
cert.set_issuer(issuerCert.get_subject())
cert.set_subject(req.get_subject())
cert.set_pubkey(req.get_pubkey())
cert.sign(issuerKey, digest)
return cert
|
from threading import Thread
import pickle
import time
from pymouse import PyMouse
# from evdev import InputDevice, ecodes, UInput
from evdev import UInput, ecodes
class History(Thread):
def __init__(self):
Thread.__init__(self)
self.n = 0
self.st = | True # Stop
self.sl = False # Sleep
self.ui = UInput()
self.history = []
self.m = PyMouse()
self.flag = True
self.rec = False
def send_event(self):
i = 0
while i < len(self.history):
now = self.history[i]
if i < len(self.history)-1: after = self.history[i+1]
if self.st: break
if not self.sl:
| self.m.move( now.get("mouse")[0], now.get("mouse")[1])
if now.get("event").type == ecodes.EV_KEY:
self.ui.write(ecodes.EV_KEY, now.get("event").code, now.get("event").value)
self.ui.syn()
if i < len(self.history):
time.sleep(float(after.get("event").sec - now.get("event").sec)+float(after.get("event").usec - now.get("event").usec)/1000000)
i += 1
# sobrescrevendo o metodo run()
def run(self):
while self.flag:
if not self.st:
if self.n == 0:
while not self.stop:
self.send_event()
elif self.n > 0:
for i in range(self.n):
self.send_event()
self.st = True
# print self.history
print("\nEnd")
def exit(self):
self.stop()
self.flag = False
def play(self):
self.st = False
def stop(self):
print("Stop")
self.st = True
def sleep(self):
if self.sl:
print("Play")
else:
print("Pause")
self.sl = not self.sleep
def reset(self):
self.history = []
def append_event(self, event):
# if event.type == ecodes.EV_KEY:
self.history.append({"mouse": self.m.position(), 'event': event})
# print(self.m.position())
# if self.rec:
def set_n(self, n):
self.n = n
def save(self):
s = raw_input("\nDigite o nome do arquivo: ")
pickle.dump( self.history, open(s, 'wb'), -1)
print("Salvo em %s" %s)
def load(self):
""" Carrega um historico. """
s = raw_input("\nDigite o nome do arquivo: ")
try:
self.history = pickle.load(open(s, 'rb'))
print("Carregado de %s" %s)
except FileNotFoundError:
print("Arquivo inexistente!")
if __name__ == '__main__':
h = History() |
# -*- coding: utf-8 -*-
# License | AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from . import t | est_account_payment_transfer_reconcile_batch
|
class PlotEnum():
| point = 'POINT',
line = | 'LINE',
bar = 'BAR' |
:param test: The test that has been skipped.
:param err: The exc_info of the error that was raised.
:return: None
"""
# This is the python 2.7 implementation
self.expectedFailures.append(
(test, self._err_details_to_string(test, err, details)))
def addError(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.errors.append((test,
self._err_details_to_string(test, err, details)))
def addFailure(self, test, err=None, details=None):
"""Called when an error has occurred. 'err' is a tuple of values as
returned by sys.exc_info().
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
"""
self.failures.append((test,
self._err_details_to_string(test, err, details)))
def addSkip(self, test, reason=None, details=None):
"""Called when a test has been skipped rather than running.
Like with addSuccess and addError, testStopped should still be called.
This must be called by the TestCase. 'addError' and 'addFailure' will
not call addSkip, since they have no assumptions about the kind of
errors that a test can raise.
:param test: The test that has been skipped.
:param reason: The reason for the test being skipped. For instance,
u"pyGL is not available".
:param details: Alternative way to supply details about the outcome.
see the class docstring for more information.
:return: None
"""
if reason is None:
reason = details.get('reason')
if reason is None:
reason = 'No reason given'
else:
reason = ''.join(reason.iter_text())
skip_list = self.skip_reasons.setdefault(reason, [])
skip_list.append(test)
def addSuccess(self, test, details=None):
"""Called when a test succeeded."""
def addUnexpectedSuccess(self, test, details=None):
"""Called when a test was expected to fail, but succeed."""
self.unexpectedSuccesses.append(test)
def wasSuccessful(self):
"""Has this result been successful so far?
If there have been any errors, failures or unexpected successes,
return False. Otherwise, return True.
Note: This differs from standard unittest in that we consider
unexpected successes to be equivalent to failures, rather than
successes.
"""
return not (self.errors or self.failures or self.unexpectedSuccesses)
if str_is_unicode:
# Python 3 and IronPython strings are unicode, use parent class method
_exc_info_to_unicode = unittest.TestResult._exc_info_to_string
else:
# For Python 2, need to decode components of traceback according to
# their source, so can't use traceback.format_exception
# Here follows a little deep magic to copy the existing method and
# replace the formatter with one that returns unicode instead
from types import FunctionType as __F, ModuleType as __M
__f = unittest.TestResult._exc_info_to_string.im_func
__g = dict(__f.func_globals)
__m = __M("__fake_traceback")
__m.format_exception = _format_exc_info
__g["traceback"] = __m
_exc_info_to_unicode = __F(__f.func_code, __g, "_exc_info_to_unicode")
del __F, __M, __f, __g, __m
def _err_details_to_string(self, test, err=None, details=None):
"""Convert an error in exc_info form or a contents dict to a string."""
if err is not None:
return self._exc_info_to_unicode(err, test)
return _details_to_str(details, special='traceback')
def _now(self):
"""Return the current 'test time'.
If the time() method has not been called, this is equivalent to
datetime.now(), otherwise its the last supplied datestamp given to the
time() method.
"""
if self.__now is None:
return datetime.datetime.now(utc)
else:
return self.__now
def startTestRun(self):
"""Called before a test run starts.
New in Python 2.7. The testtools version resets the result to a
pristine condition ready for use in another test run. Note that this
is different from Python 2.7's startTestRun, which does nothing.
"""
super(TestResult, self).__init__()
self.skip_reasons = {}
self.__now = None
# -- Start: As per python 2.7 --
self.expectedFailures = []
self.unexpectedSuccesses = []
# -- End: As per python 2.7 --
def stopTestRun(self):
"""Called after a test run completes
New in python 2.7
"""
def time(self, a_datetime):
"""Provide a timestamp to represent the current time.
This is useful when test activity is time delayed, or happening
concurrently and getting the system time between API calls will not
accurately represent the duration of tests (or the whole run).
Calling time() sets the datetime used by the TestResult object.
Time is permitted to go backwards when using this call.
:param a_datetime: A datetime.datetime object with TZ information or
None to reset the TestResult to gathering time from the system.
"""
self.__now = a_datetime
def done(self):
"""Called when the test runner is done.
deprecated in favour of stopTestRun.
"""
class MultiTestResult(TestResult):
"""A test result that dispatches to many test results."""
def __init__(self, *results):
TestResult.__init__(self)
self._results = list(map(ExtendedToOriginalDecorator, results))
def __repr__(self):
return '<%s (%s)>' % (
self.__class__.__name__, ', '.join(map(repr, self._results)))
def _dispatch(self, message, *args, **kwargs):
return tuple(
| getattr(result, message)(*args, **kwargs)
for result in self._results)
def startTest(self, test):
return self._dispatch('startTest', test)
def stopTest(self, test):
return self._dispatch('stopTest', test)
def addError(self, test, er | ror=None, details=None):
return self._dispatch('addError', test, error, details=details)
def addExpectedFailure(self, test, err=None, details=None):
return self._dispatch(
'addExpectedFailure', test, err, details=details)
def addFailure(self, test, err=None, details=None):
return self._dispatch('addFailure', test, err, details=details)
def addSkip(self, test, reason=None, details=None):
return self._dispatch('addSkip', test, reason, details=details)
def addSuccess(self, test, details=None):
return self._dispatch('addSuccess', test, details=details)
def addUnexpectedSuccess(self, test, details=None):
return self._dispatch('addUnexpectedSuccess', test, details=details)
def startTestRun(self):
return self._dispatch('startTestRun')
def stopTestRun(self):
return self._dispatch('stopTestRun')
def time(self, a_datetime):
return self._dispatch('time', a_datetime)
def done(self):
return self._dispatch('done')
def wasSuccessful(self):
"""Was this result successful?
Only returns True if every constituent result was successful.
"""
return all(self._dispatch('wasSuccessful'))
class TextTestResult(TestResult):
"""A TestResult which outputs activity to a text stream."""
def __init__(self, stream):
"""Construct a TextTestResult writing to stream."""
super(TextTestResult, self).__init__()
self.stream = stream
self.sep1 = '=' * 70 + '\n'
self.sep2 = |
from django.conf.urls import patterns, url
from allauth.socialaccount.providers.oauth2.urls import default_urlpatterns
from .provider import FacebookProvider
from . import views
urlpatterns = default_urlpatterns(FacebookProvide | r)
urlpatterns += patterns('',
url('^facebook/login/token/$', views.login_by_token,
name="facebook_login_by_token"),
url('^facebook/c | hannel/$', views.channel, name='facebook_channel'),
)
|
import re
from decimal import Decimal
import sys
import struct
from rdbtools.parser import RdbCallback, RdbParser
ESCAPE = re.compile(ur'[\x00-\x1f\\"\b\f\n\r\t\u2028\u2029]')
ESCAPE_ASCII = re.compile(r'([\\"]|[^\ -~])')
HAS_UTF8 = re.compile(r'[\x80-\xff]')
ESCAPE_DCT = {
'\\': '\\\\',
'"': '\\"',
'\b': '\\b',
'\f': '\\f',
'\n': '\\n',
'\r': '\\r',
'\t': '\\t',
u'\u2028': '\\u2028',
u'\u2029': '\\u2029',
}
for i in range(0x20):
ESCAPE_DCT.setdefault(chr(i), '\\u%04x' % (i,))
def _floatconstants():
_BYTES = '7FF80000000000007FF0000000000000'.decode('hex')
# The struct module in Python 2.4 would get frexp() out of range here
# when an endian is specified in the format string. Fixed in Python 2.5+
if sys.byteorder != 'big':
_BYTES = _BYTES[:8][::-1] + _BYTES[8:][::-1]
nan, inf = struct.unpack('dd', _BYTES)
return nan, inf, -inf
NaN, PosInf, NegInf = _floatconstants()
def _encode_basestring(s):
"""Return a JSON representation of a Python string"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"'
def _encode_basestring_ascii(s):
"""Return an ASCII-only JSON representation of a Python string
"""
try :
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
except:
pass
def replace(match):
s = match.group(0)
try:
return ESCAPE_DCT[s]
except KeyError:
n = ord(s)
if n < 0x10000:
#return '\\u{0:04x}'.format(n)
return '\\u%04x' % (n,)
else:
# surrogate pair
n -= 0x10000
s1 = 0xd800 | ((n >> 10) & 0x3ff)
s2 = 0xdc00 | (n & 0x3ff)
return '\\u%04x\\u%04x' % (s1, s2)
return '"' + str(ESCAPE_ASCII.sub(replace, s)) + '"'
def _encode(s, quote_numbers = True):
if quote_numbers:
qn = '"'
else:
qn = ''
if isinstance(s, int) or isinstance(s, long):
return qn + str(s) + qn
elif isinstance(s, float):
if s != s:
return "NaN"
elif s == PosInf:
return "Infinity"
elif s == NegInf:
return "-Infinity"
else:
return qn + str(s) + qn
else:
return _encode_basestring_ascii(s)
def encode_key(s):
return _encode(s, quote_numbers=True)
def encode_value(s):
return _encode(s, quote_numbers=False)
class JSONCallback(RdbCallback):
def __init__(self, out):
self._out = out
self._is_first_db = True
self._has_databases = False
self._is_first_key_in_db = True
self._elements_in_key = 0
self._element_index = 0
def start_rdb(self):
self._out.write('[')
def start_database(self, db_number):
if not self._is_first_db:
self._out.write('},')
self._out.write('{')
self._is_first_db = False
self._has_databases = True
self._is_first_key_in_db = True
def end_database(self, db_number):
pass
def end_rdb(self):
if self._has_databases:
self._out.write('}')
self._out.write(']')
def _start_key(self, key, length):
if not self._is_first_key_in_db:
self._out.write(',')
self._out.write('\r\n')
self._is_first_key_in_db = False
self._elements_in_key = length
self._element_index = 0
def _end_key(self, key):
pass
def _write_comma(self):
if self._element_index > 0 and self._element_index < self._elements_in_key :
self._out.write(',')
self._element_index = self._element_index + 1
def set(self, key, value, expiry, info):
self._start_key(key, 0)
self._out.write('%s:%s' % (encode_key(key), encode_value(value)))
def start_hash(self, key, length, expiry, info):
self._start_key(key, length)
self._out.write('%s:{' % encode_key(key))
def hset(self, key, field, value):
self._write_comma()
self._out.write('%s:%s' % (encode_key(field), encode_value(value)))
def end_hash(self, key):
self._end_key(key)
self._out.write('}')
def start_set(self, key, cardinality, expiry, info):
self._start_key(key, cardinality)
self._out.write('%s:[' % encode_key(key))
def sadd(self, key, member):
self._write_comma()
self._out.write('%s' % encode_value(member))
def end_set(self, key):
self._end_key(key)
self._out.write(']')
def start_list(self, key, length, expiry, info):
self._start_key(key, length)
self._out.write('%s:[' % encode_key(key))
def rpush(self, key, value) :
self._write_comma()
self._out.write('%s' % encode_value(value))
def end_list(self, key):
self._end_key(key)
self._out.write(']')
def start_sorted_set(self, key, length, expiry, info):
self._start_key(key, length)
self._out.write('%s:{' % encode_key(key))
def zadd(self, key, score, member):
self._write_comma()
self._out.write('%s:%s' % (encode_key(member), encode_value(score)))
def end_sorted_set(self, key):
self._end_key(key)
self._out.write('}')
class DiffCallback(RdbCallback):
'''Prints the con | tents of RDB in a format that is unix sort friendly,
so that two rdb files can be diffed easily'''
def __init__(self, out):
self._out = out
self._ind | ex = 0
self._dbnum = 0
def start_rdb(self):
pass
def start_database(self, db_number):
self._dbnum = db_number
def end_database(self, db_number):
pass
def end_rdb(self):
pass
def set(self, key, value, expiry, info):
self._out.write('db=%d %s -> %s' % (self._dbnum, encode_key(key), encode_value(value)))
self.newline()
def start_hash(self, key, length, expiry, info):
pass
def hset(self, key, field, value):
self._out.write('db=%d %s . %s -> %s' % (self._dbnum, encode_key(key), encode_key(field), encode_value(value)))
self.newline()
def end_hash(self, key):
pass
def start_set(self, key, cardinality, expiry, info):
pass
def sadd(self, key, member):
self._out.write('db=%d %s { %s }' % (self._dbnum, encode_key(key), encode_value(member)))
self.newline()
def end_set(self, key):
pass
def start_list(self, key, length, expiry, info):
self._index = 0
def rpush(self, key, value) :
self._out.write('db=%d %s[%d] -> %s' % (self._dbnum, encode_key(key), self._index, encode_value(value)))
self.newline()
self._index = self._index + 1
def end_list(self, key):
pass
def start_sorted_set(self, key, length, expiry, info):
self._index = 0
def zadd(self, key, score, member):
self._out.write('db=%d %s[%d] -> {%s, score=%s}' % (self._dbnum, encode_key(key), self._index, encode_key(member), encode_value(score)))
self.newline()
self._index = self._index + 1
def end_sorted_set(self, key):
pass
def newline(self):
self._out.write('\r\n')
|
:
q = hawkey.Query(self.sack)
self.assertRaises(hawkey.ValueException, q.filter, flying__eq="name")
self.assertRaises(hawkey.ValueException, q.filter, flying="name")
def test_unicode(self):
q = hawkey.Query(self.sack)
q.filterm(name__eq=u"flying")
self.assertEqual(q.count(), 1)
q = hawkey.Query(self.sack)
q.filterm(name__eq=[u"flying", "penny"])
self.assertEqual(q.count(), 2)
def test_count(self):
q = hawkey.Query(self.sack).filter(name=["flying", "penny"])
self.assertIsNone(q.result)
self.assertEqual(len(q), 2)
self.assertIsNotNone(q.result)
self.assertEqual(len(q), q.count())
self.assertTrue(q)
q = hawkey.Query(self.sack).filter(name="naturalE")
self.assertFalse(q)
self.assertIsNotNone(q.result)
def test_kwargs_check(self):
q = hawkey.Query(self.sack)
self.assertRaises(hawkey.ValueException, q.filter,
name="flying", upgrades="maracas")
def test_kwargs(self):
q = hawkey.Query(self.sack)
# test combining several criteria
q.filterm(name__glob="*enny*", summary__substr="eyes")
self.assertEqual(q.count(), 1)
# test shortcutting for equality comparison type
q = hawkey.Query(self.sack)
q.filterm(name="flying")
self.assertEqual(q.count(), 1)
# test flags parsing
q = hawkey.Query(self.sack).filter(name="FLYING")
self.assertEqual(q.count(), 0)
q = hawkey.Query(self.sack).filter(hawkey.ICASE, name="FLYING")
self.assertEqual(q.count(), 1)
def test_in(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=["ool", "enny-li"])
self.assertEqual(q.count(), 2)
def test_in_set(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=set(["ool", "enny-li"]))
self.assertEqual(q.count(), 2)
def test_iteration(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=["penny"])
self.assertEqual(q.count(), 2)
self.assertNotEqual(q[0], q[1])
def test_clone(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr=["penny"])
q_clone = hawkey.Query(query=q)
del q
self.assertEqual(q_clone.count(), 2)
self.assertNotEqual(q_clone[0], q_clone[1])
def test_clone_with_evaluation(self):
q = hawkey.Query(self.sack)
q.filterm(name__substr="penny")
q.run()
q_clone = hawkey.Query(query=q)
del q
self.assertTrue(q_clone.evaluated)
self.assertLength(q_clone.result, 2)
def test_immutability(self):
q = hawkey.Query(self.sack).filter(name="jay")
q2 = q.filter(evr="5.0-0")
self.assertEqual(q.count(), 2)
self.assertEqual(q2.count(), 1)
def test_copy_lazyness(self):
q = hawkey.Query(self.sack).filter(name="jay")
self.assertIsNone(q.result)
q2 = q.filter(evr="5.0-0")
self.assertIsNone(q.result)
def test_empty(self):
q = hawkey.Query(self.sack).filter | (empty=True)
self.assertLength(q, 0)
q = hawkey.Query(self.sack)
self.assertRaises(hawkey.ValueExcep | tion, q.filter, empty=False)
def test_epoch(self):
q = hawkey.Query(self.sack).filter(epoch__gt=4)
self.assertEqual(len(q), 1)
self.assertEqual(q[0].epoch, 6)
def test_version(self):
q = hawkey.Query(self.sack).filter(version__gte="5.0")
self.assertEqual(len(q), 3)
q = hawkey.Query(self.sack).filter(version__glob="1.2*")
self.assertLength(q, 2)
def test_package_in(self):
pkgs = list(hawkey.Query(self.sack).filter(name=["flying", "penny"]))
q = hawkey.Query(self.sack).filter(pkg=pkgs)
self.assertEqual(len(q), 2)
q2 = q.filter(version__gt="3")
self.assertEqual(len(q2), 1)
def test_nevra_match(self):
query = hawkey.Query(self.sack).filter(nevra__glob="*lib*64")
self.assertEqual(len(query), 1)
self.assertEqual(str(query[0]), 'penny-lib-4-1.x86_64')
def test_repeated(self):
q = hawkey.Query(self.sack).filter(name="jay")
q.filterm(latest_per_arch=True)
self.assertEqual(len(q), 1)
def test_latest(self):
q = hawkey.Query(self.sack).filter(name="pilchard")
q.filterm(latest_per_arch=True)
self.assertEqual(len(q), 2)
q.filterm(latest=True)
self.assertEqual(len(q), 1)
def test_reldep(self):
flying = base.by_name(self.sack, "flying")
requires = flying.requires
q = hawkey.Query(self.sack).filter(provides=requires[0])
self.assertEqual(len(q), 1)
self.assertEqual(str(q[0]), "penny-lib-4-1.x86_64")
self.assertRaises(hawkey.QueryException, q.filter,
provides__gt=requires[0])
def test_reldep_list(self):
self.sack.load_test_repo("updates", "updates.repo")
fool = base.by_name_repo(self.sack, "fool", "updates")
q = hawkey.Query(self.sack).filter(provides=fool.obsoletes)
self.assertEqual(str(q.run()[0]), "penny-4-1.noarch")
def test_disabled_repo(self):
self.sack.disable_repo(hawkey.SYSTEM_REPO_NAME)
q = hawkey.Query(self.sack).filter(name="jay")
self.assertLength(q.run(), 0)
self.sack.enable_repo(hawkey.SYSTEM_REPO_NAME)
q = hawkey.Query(self.sack).filter(name="jay")
self.assertLength(q.run(), 2)
def test_multiple_flags(self):
q = hawkey.Query(self.sack).filter(name__glob__not=["p*", "j*"])
self.assertItemsEqual(list(map(lambda p: p.name, q.run())),
['baby', 'dog', 'flying', 'fool', 'gun', 'tour'])
class TestQueryAllRepos(base.TestCase):
def setUp(self):
self.sack = base.TestSack(repo_dir=self.repo_dir)
self.sack.load_system_repo()
self.sack.load_test_repo("main", "main.repo")
self.sack.load_test_repo("updates", "updates.repo")
def test_requires(self):
reldep = hawkey.Reldep(self.sack, "semolina = 2")
q = hawkey.Query(self.sack).filter(requires=reldep)
self.assertItemsEqual(list(map(str, q.run())),
['walrus-2-5.noarch', 'walrus-2-6.noarch'])
reldep = hawkey.Reldep(self.sack, "semolina > 1.0")
q = hawkey.Query(self.sack).filter(requires=reldep)
self.assertItemsEqual(list(map(str, q.run())),
['walrus-2-5.noarch', 'walrus-2-6.noarch'])
def test_obsoletes(self):
reldep = hawkey.Reldep(self.sack, "penny < 4-0")
q = hawkey.Query(self.sack).filter(obsoletes=reldep)
self.assertItemsEqual(list(map(str, q.run())), ['fool-1-5.noarch'])
def test_downgradable(self):
query = hawkey.Query(self.sack).filter(downgradable=True)
self.assertEqual({str(pkg) for pkg in query},
{'baby-6:5.0-11.x86_64', 'jay-5.0-0.x86_64'})
class TestQueryUpdates(base.TestCase):
def setUp(self):
self.sack = base.TestSack(repo_dir=self.repo_dir)
self.sack.load_system_repo()
self.sack.load_test_repo("updates", "updates.repo")
def test_upgradable(self):
query = hawkey.Query(self.sack).filter(upgradable=True)
self.assertEqual({str(pkg) for pkg in query},
{'dog-1-1.x86_64', 'flying-2-9.noarch',
'fool-1-3.noarch', 'pilchard-1.2.3-1.i686',
'pilchard-1.2.3-1.x86_64'})
def test_updates_noarch(self):
q = hawkey.Query(self.sack)
q.filterm(name="flying", upgrades=1)
self.assertEqual(q.count(), 3)
def test_updates_arch(self):
q = hawkey.Query(self.sack)
pilchard = q.filter(name="dog", upgrades=True)
self.assertItemsEqual(list(map(str, pilchard.run())), ['dog-1-2.x86_64'])
def test_glob_arch(self):
q = hawkey.Query(self.sack)
pilchard = q.filter(name="pilchard", version="1.2.4", release="1",
arch__ |
ffix for filtering the variables to return.
Returns:
a list of variables in the trainable collection with scope and suffix.
"""
return get_variables(scope, suffix, ops.GraphKeys.TRAINABLE_VARIABLES)
def get_variables_to_restore(include=None, exclude=None):
"""Gets the list of the variables to restore.
Args:
include: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to include. None would include all
the variables.
exclude: an optional list/tuple of scope strings for filtering which
variables from the VARIABLES collection to exclude. None it would not
exclude any.
Returns:
a list of variables to restore.
Raises:
TypeError: include or exclude is provided but is not a list or a tuple.
"""
if include is None:
# Include all variables.
vars_to_include = get_variables()
else:
if not isinstance(include, (list, tuple)):
raise TypeError('include is provided but is not a list or a tuple.')
vars_to_include = []
for scope in include:
vars_to_include += g | et_variables(sco | pe)
vars_to_exclude = set()
if exclude is not None:
if not isinstance(exclude, (list, tuple)):
raise TypeError('exclude is provided but is not a list or a tuple.')
for scope in exclude:
vars_to_exclude |= set(get_variables(scope))
# Exclude the variables in vars_to_exclude
return [v for v in vars_to_include if v not in vars_to_exclude]
def get_variables_by_suffix(suffix, scope=None):
"""Gets the list of variables that end with the given suffix.
Args:
suffix: suffix for filtering the variables to return.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and prefix.
"""
return get_variables(scope=scope, suffix=suffix)
def get_variables_by_name(given_name, scope=None):
"""Gets the list of variables that were given that name.
Args:
given_name: name given to the variable without any scope.
scope: an optional scope for filtering the variables to return.
Returns:
a copied list of variables with the given name and scope.
"""
suffix = '/' + given_name + ':|^' + given_name + ':'
return get_variables(scope=scope, suffix=suffix)
def get_unique_variable(var_op_name):
"""Gets the variable uniquely identified by that var_op_name.
Args:
var_op_name: the full name of the variable op, including the scope.
Returns:
a tensorflow variable.
Raises:
ValueError: if no variable uniquely identified by the name exists.
"""
candidates = get_variables(scope=var_op_name)
if not candidates:
raise ValueError('Couldnt find variable %s' % var_op_name)
for candidate in candidates:
if candidate.op.name == var_op_name:
return candidate
raise ValueError('Variable %s does not uniquely identify a variable',
var_op_name)
def assign_from_values(var_names_to_values):
"""Creates an assignment operation from a given mapping.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
assign_op: An `Operation` that assigns each of the given variables to the
requested values.
feed_dict: The feed dictionary to use when evaluating `assign_op`.
Raises:
ValueError: if any of the given variable names were not found.
"""
feed_dict = {}
assign_ops = []
for var_name in var_names_to_values:
var_value = var_names_to_values[var_name]
var = ops.get_collection(ops.GraphKeys.GLOBAL_VARIABLES, var_name)
if not var:
raise ValueError('Variable %s wasnt found', var_name)
elif len(var) > 1:
# tf.get_collection is just a filter on the prefix: find the exact match:
found = False
for v in var:
if v.op.name == var_name:
var = v
found = True
break
if not found:
raise ValueError('Variable %s doesnt uniquely identify a variable',
var_name)
else:
var = var[0]
# TODO(nsilberman): ensure placeholder and assign are on the same device.
# Assign a placeholder to the value that will be filled later.
placeholder_name = 'placeholder/' + var.op.name
placeholder_value = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name=placeholder_name)
assign_ops.append(var.assign(placeholder_value))
feed_dict[placeholder_value] = var_value.reshape(var.get_shape())
assign_op = control_flow_ops.group(*assign_ops)
return assign_op, feed_dict
def assign_from_values_fn(var_names_to_values):
"""Returns a function that assigns specific variables from the given values.
This function provides a mechanism for performing assignment of variables
to values in a way that does not fill the graph with large assignment values.
Args:
var_names_to_values: A map from variable names to values.
Returns:
A function that takes a single argument, a `tf.Session`, that applies the
assignment operation.
Raises:
ValueError: if any of the given variable names were not found.
"""
assign_op, feed_dict = assign_from_values(var_names_to_values)
def callback(session):
return session.run(assign_op, feed_dict)
return callback
# pylint: disable=protected-access
# Currently variable_scope doesn't provide very good APIs to access
# all variables under scope and retrieve and check existing scopes.
#
# TODO(nsilberman): add flag to load exponential moving averages instead
def assign_from_checkpoint(model_path, var_list):
"""Creates an operation to assign specific variables from a checkpoint.
Args:
model_path: The full path to the model checkpoint. To get latest checkpoint
use `model_path = tf.train.latest_checkpoint(checkpoint_dir)`
var_list: A list of (possibly partitioned) `Variable` objects
or a dictionary mapping names in the checkpoint to the
corresponding variables or list of variables to initialize
from that checkpoint value. For partitioned Variables, the
name in the checkpoint must be the full variable, not the
name of the partitioned variable, eg. "my_var" rather than
"my_var/part_4". If empty, returns no_op(), {}.
Returns:
the restore_op and the feed_dict that need to be run to restore var_list.
Raises:
ValueError: If the checkpoint specified at `model_path` is missing one of
the variables in `var_list`.
"""
# Normalize var_list into a dictionary mapping names in the
# checkpoint to the list of variables to initialize from that
# checkpoint variable. Sliced (including partitioned) variables will
# end up under the same key.
grouped_vars = {}
if isinstance(var_list, (tuple, list)):
for var in var_list:
if var._save_slice_info:
ckpt_name = var._save_slice_info.full_name
else:
ckpt_name = var.op.name
if ckpt_name not in grouped_vars:
grouped_vars[ckpt_name] = []
grouped_vars[ckpt_name].append(var)
else:
for ckpt_name, value in var_list.iteritems():
if isinstance(value, (tuple, list)):
grouped_vars[ckpt_name] = value
else:
grouped_vars[ckpt_name] = [value]
# Read each checkpoint entry. Create a placeholder variable and
# add the (possibly sliced) data from the checkpoint to the feed_dict.
reader = pywrap_tensorflow.NewCheckpointReader(model_path)
feed_dict = {}
assign_ops = []
for ckpt_name in grouped_vars:
if not reader.has_tensor(ckpt_name):
raise ValueError(
'Checkpoint is missing variable [%s]' % ckpt_name)
ckpt_value = reader.get_tensor(ckpt_name)
for var in grouped_vars[ckpt_name]:
placeholder_tensor = array_ops.placeholder(
dtype=var.dtype.base_dtype,
shape=var.get_shape(),
name='placeholder/' + ckpt_name)
assign_ops.append(var.assign(placeholder_tensor))
if not var._save_slice_info:
|
import numpy as np
class LinfinityRegression:
def __init__(self, iterations, learning_rate, regularization_strength):
self.iterations = iterations
self.learning_rate = learning_rate
self.regularization_strength = re | gularization_strength
@staticmethod
def soft_thresholding_operator(x, l):
"""
This method is used to update the weights when performing Gradient Descent.
Whenever the loss function is just the least square loss function, we can minimize by taking the derivative.
However, we cannot minimise the Lasso Loss function in the same weight because the function is not differentiable
at w = 0 (where w is any of the weight component)
:param x:
:par | am l:
:return:
"""
maxw = max(x)
for i in range(0, len(x)):
if np.abs(x[i]) > np.abs(maxw):
x[i] = l*np.sign(x[i])
elif np.abs(x[i]) < np.abs(maxw) :
x[i] = l*np.sign(maxw)
return x
@staticmethod
def mse_cost_function(predicted_output, actual_output):
"""
This method calculates the error and the MSE cost function given a predicted_value and the actual_value
:param predicted_output:
:param actual_output:
:return: Mean Square Error, Error.
"""
error = predicted_output - actual_output
mse_cost = np.sum(error ** 2) /(2*len(actual_output))
return mse_cost, error
def calculate_weights(self, training_records, output):
mse_costs = []
weights = np.random.rand(training_records.shape[1])
weights_table = [weights]
predicted_outputs = []
for i in range(self.iterations):
predicted_output = np.dot(training_records, weights)
predicted_outputs.append(predicted_output)
mse_cost, error = LinfinityRegression.mse_cost_function(predicted_output, output)
mse_costs.append(mse_cost)
slope = training_records.T.dot(error)/(len(output))
weights = LinfinityRegression.soft_thresholding_operator(weights - self.learning_rate*slope,
self.regularization_strength)
weights_table.append(weights.copy())
return weights_table, mse_costs, predicted_outputs |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE | SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/loot/loot_schematic/shared_park_bench_schematic.iff"
result.attribute_template_id = -1
result.stfName("craft_item_ingredients_n","park_bench")
#### BEGIN | MODIFICATIONS ####
#### END MODIFICATIONS ####
return result |
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a co | py of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from ctypes import sizeof, windll, addressof, create_unicode_buffer
from ctypes.wintypes import DWORD, HANDLE
PROCESS_TERMINATE = 0x0001
PROCESS_QUERY_INFORMATION = 0x0400
PROCESS_VM_READ = 0x0010
def get_pids(process_name):
BIG_ARRAY = DWORD * 4096
processes = BIG_ARRAY()
needed = DWORD()
pids = []
result = windll.psapi.EnumProcesses(processes,
| sizeof(processes),
addressof(needed))
if not result:
return pids
num_results = needed.value / sizeof(DWORD)
for i in range(num_results):
pid = processes[i]
process = windll.kernel32.OpenProcess(PROCESS_QUERY_INFORMATION |
PROCESS_VM_READ,
0, pid)
if process:
module = HANDLE()
result = windll.psapi.EnumProcessModules(process,
addressof(module),
sizeof(module),
addressof(needed))
if result:
name = create_unicode_buffer(1024)
result = windll.psapi.GetModuleBaseNameW(process, module,
name, len(name))
# TODO: This might not be the best way to
# match a process name; maybe use a regexp instead.
if name.value.startswith(process_name):
pids.append(pid)
windll.kernel32.CloseHandle(module)
windll.kernel32.CloseHandle(process)
return pids
def kill_pid(pid):
process = windll.kernel32.OpenProcess(PROCESS_TERMINATE, 0, pid)
if process:
windll.kernel32.TerminateProcess(process, 0)
windll.kernel32.CloseHandle(process)
|
nMegaList:
name = a[0]
codeName = a[0].lower().replace(" ", "_")
shortDesc = (a[3][:60] + '...') if len(a[3]) > 60 else a[3]
icon = ("package-x-generic") if iconTheme.has_icon(defaultIconTheme, a[4]) == False else a[4]
installMethod = a[5]
sniqtPrefix = a[1]
if isinstance(a[2], list):
for checkLocation in a[2]:
print checkLocation
if os.path.isfile(checkLocation):
enabled = True
break
else:
enabled = False
else:
enabled = (True) if os.path.isfile(a[2]) or codeName == "core_icon_theme" else False
checkIfInstalled(codeName)
availableComponents.append([name, codeName, shortDesc, icon, installMethod, sniqtPrefix, enabled])
availableComponents.sort(key=lambda x: x[6], reverse=True)
print "installed", installedComponents
class InstallerWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title=appName)
self.set_size_request(500, 500)
self.set_icon_name("preferences-desktop")
self.error = 0
self.hb = Gtk.HeaderBar()
self.hb.set_show_close_button(True)
self.hb.props.title = "elementary+"
self.hb.set_subtitle("Configurator")
self.set_titlebar(self.hb)
searchIcon = Gtk.Image.new_from_icon_name("edit-find-symbolic", Gtk.IconSize.LARGE_TOOLBAR)
searchButton = Gtk.ToggleButton()
searchButton.set_image(searchIcon)
searchButton.connect('clicked', self.search_handler)
self.searchButton = searchButton
self.hb.pack_start(searchButton)
Notify.init(appName)
self.add(self.build_ui())
style_provider = Gtk.CssProvider()
css = """
.search-bar {
border-width: 0;
}
"""
style_provider.load_from_data(css)
Gtk.StyleContext.add_provider_for_screen(
Gdk.Screen.get_default(),
style_provider,
Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION
)
def notify(self, messageOne, messageTwo, icon):
try:
notification = Notify.Notification.new(messageOne, messageTwo, icon)
notification.set_urgency(1)
notification.show()
del notification
except:
pass
def build_ui(self):
vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=0)
self.searchBar = Gtk.SearchBar()
self.searchBar.get_style_context().add_class("primary-toolbar")
self.searchBar.set_halign(Gtk.Align.FILL)
self.searchBar.set_show_close_button(True)
entry = Gtk.SearchEntry()
entry.connect("search-changed", self.search_changed)
self.searchBar.add(entry)
self.searchBar.connect_entry(entry)
vbox.pack_start(self.searchBar, False, False, 0)
self.searchEntry = entry
self.connect("key-press-event", lambda x, y: self.searchBar.handle_event(y))
iconsPage = self.create_icons_page()
vbox.pack_start(iconsPage, True, True, 0)
return vbox
def create_icons_page(self):
scroller = Gtk.ScrolledWindow(None, None)
scroller.set_border_width(10)
scroller.set_shadow_type(Gtk.ShadowType.IN)
scroller.set_policy(Gtk.PolicyType.NEVER, Gtk.PolicyType.AUTOMATIC)
self.lbox = Gtk.ListBox()
self.lbox.set_selection_mode(Gtk.SelectionMode.NONE)
placeholder = Gtk.Label()
self.placeholder = placeholder
placeholder.set_use_markup(True)
placeholder.get_style_context().add_class("dim-label")
self.lbox.set_placeholder(placeholder)
placeholder.show_all()
scroller.add(self.lbox)
for i in range(len(availableComponents)):
for sublist in iconMegaList:
if sublist[0] == availableComponents[i][0]:
longDesc = sublist[3]
item = self.create_item(availableComponents[i][0], availableComponents[i][3], availableComponents[i][2], availableComponents[i][6])
componentSwitch = Gtk.Switch()
componentSwitch.set_name(availableComponents[i][0].lower())
componentSwitch.props.halign = Gtk.Align.END
componentSwitch.props.valign = Gtk.Align.CENTER
componentSwitch.connect("notify::active", self.callback, availableComponents[i][1], availableComponents[i][4], availableComponents[i][5])
if availableComponents[i][1] in installedComponents:
componentSwitch.set_active(True)
| if availableComponents[i][1] == "core_icon_theme":
currentTheme = systemSettings.get_string("icon-theme")
if currentTheme == iconThemeName:
componentSwitch.set_active(True)
wrap = G | tk.HBox(0)
wrap.pack_start(item, True, True, 0)
wrap.pack_end(componentSwitch, False, False, 2)
if availableComponents[i][6] is False:
wrap.set_sensitive(False)
wrap.set_tooltip_text(longDesc)
self.lbox.add(wrap)
return scroller
def create_item(self, name, iconName, shortDesc, enabled):
grid = Gtk.Grid()
grid.set_border_width(16)
grid.set_row_spacing(4)
grid.set_column_spacing(16)
if enabled is True:
label = Gtk.Label("<big>%s</big>" % name)
else:
label = Gtk.Label("<big>%s (Not installed)</big>" % name)
label.set_use_markup(True)
label.set_alignment(0.0, 0.5)
icon = Gtk.Image.new_from_icon_name(iconName, Gtk.IconSize.DIALOG)
desc = Gtk.Label(shortDesc)
desc.get_style_context().add_class("dim-label")
desc.set_alignment(0.0, 0.5)
grid.attach(icon, 0, 0, 1, 2)
grid.attach(label, 1, 0, 1, 1)
grid.attach(desc, 1, 1, 1, 1)
return grid
def search_handler(self, w):
w.freeze_notify()
self.searchBar.set_search_mode(w.get_active())
w.thaw_notify()
def search_changed(self, w, data=None):
text = w.get_text().strip()
if text == "":
self.searchBar.set_search_mode(False)
act = False if text == "" else True
self.searchButton.freeze_notify()
self.searchButton.set_active(act)
self.searchButton.thaw_notify()
self.searching(w)
def searching(self, entry, event=None):
text = entry.get_text().strip()
self.lbox.set_filter_func(self.filter, text)
res = False
for child in self.lbox.get_children():
if child.get_visible() and child.get_child_visible():
res = True
break
if not res:
self.placeholder.set_markup("<big>No results</big>")
def filter(self, row, text):
name = row.get_children()[0].get_children()[0].get_children()[1].get_text()
desc = row.get_children()[0].get_tooltip_text()
if text.lower() in name.lower() or text.lower() in desc.lower():
return True
else:
return False
def callback(self, widget, event, data, method, sniqtPrefix):
if widget.get_active() == 1:
if data == "core_icon_theme":
self.toggleTheme("install")
elif data not in installedComponents:
self.install(data, method, sniqtPrefix)
else:
if data == "core_icon_theme":
self.toggleTheme("remove")
elif data in installedComponents and self.error == 0:
self.remove(data, method, sniqtPrefix)
def install(self, appName, installMethod, sniqtPrefix):
patchedSniqt = settings.get_boolean("sniqt-patched")
if appName != "core_icon_theme" and appName != "telegram_desktop" and patchedSniqt is False and fromPPA is False:
print "Installing patched sni-qt"
self.notify('This may take a while', 'Please don\'t close the window', 'preferences-desktop')
if subprocess.call(['pkexec', scripts + "sni-qt.sh"]) == 0:
cache = apt.Cache()
version = cache |
# coding=utf-8
from ._commandbase import RadianceCommand
from ..datatype import RadiancePath
import os
class Epw2wea(RadianceCommand):
"""epw2wea transforms an EnergyPlus weather data (.epw) file into
the DAYSIM weather file format, for use with the RADIANCE gendaymtx
program.
Attributes:
epw_file: Filepath of the epw file that is to be converted into wea
format.
Usage:
from honeybee_plus.radiance.command.epw2wea import Epw2wea.
#create an epw2wea command.
epwWea = Epw2wea(epw_fileName='c:/ladybug/test.epw')
"""
_epw_file = RadiancePath('_epw_file',
descriptive_name='Epw weather data file',
relative_path=None, check_exists=False)
output_wea_file = RadiancePath('output_wea_file',
descriptive_name='Output wea file',
relative_path=None, check_exists=False)
def __init__(self, epw_file=None, output_wea_file=None):
RadianceCommand.__init__(self)
self.epw_file = epw_file
"""The path of the epw file that is to be converted to a wea file."""
self.output_wea_file = output_wea_file
"""The path of the output wea file. Note that this path will be created
if not specified by the user."""
@property
def epw_file(self):
return self._epw_file
@epw_file.setter
def epw_file(self, value):
"""The path of the epw file that is to be converted to a wea file."""
if value:
self._epw_file = value
if not self.output_wea_file._value:
self.output_wea_file = os.path.splitext(value)[0] + '.wea'
else:
self._epw_file = None
def to_rad_string(self, relative_path=False):
"""Return full radiance command as string"""
rad_string = "%s %s %s" % (
'"%s"' % os.path.join(self.radbin_path, 'epw2wea | '),
self.epw_file.to_rad_string(),
self.output_wea_file.to_rad_string())
# self.check_input_files(rad_string)
return rad_string
| @property
def input_files(self):
"""Return input files specified by user."""
return self.epw_file.normpath,
|
', 'coins', 'friends', 'ignores', 'requests', 'inventories', 'mails', 'memberships',
'musicTracks', 'puffles', 'stamps', 'stampCovers', 'igloos']
class Coin(DBObject):
pass
class Igloo(DBObject):
HASMANY = ['iglooFurnitures', 'iglooLikes']
@inlineCallbacks
def get_likes_count(self):
likes = yield Registry.getConfig().execute("SELECT COALESCE(SUM(likes), 0) FROM igloo_likes where "
"igloo_id = %s" % (self.id))
returnValue(likes[0][0])
@inlineCallbacks
def get_furnitures(self):
furnitu | res = yield self.iglooFurnitures.get()
returnValue(furnitures)
@inlineCallbacks
def get_furnitures_string(self):
furnitures = yield self.get_furnitures()
furn_data = map(lambda i: '|'.join(map(str, map(int, [i.furn_id, i.x, i.y, i.rotate, i.frame]))), furnitures)
returnValue(','.join(furn_data))
@inlineCallbacks
def updateFurnitures(self, furn | itures):
yield self.refresh()
yield IglooFurniture.deleteAll(where=['igloo_id = ?', self.id])
furn = [IglooFurniture(igloo_id=self.id, furn_id=x[0], x=x[1], y=x[2], rotate=x[3], frame=x[4])
for x in furnitures]
[(yield i.save()) for i in furn]
yield self.iglooFurnitures.set(furn)
class IglooFurniture(DBObject):
pass
class IglooLike(DBObject):
def get_time(self):
return int(time.mktime(self.time.timetuple()))
class Avatar(DBObject):
pass
class Currency(DBObject):
pass
class Ninja(DBObject):
pass
class Asset(DBObject):
def getPurchasedTimestamp(self):
return int(time.mktime(self.purchased.timetuple()))
class Ban(DBObject):
def banned(self):
return hours > 0
def hours(self):
expire = int(time.mktime(self.expire.timetuple()))
hours = (expire - time.time()) / (60 * 60.0) if expire > time.time() else 0
return hours
class CareItem(DBObject):
pass
class Friend(DBObject):
friend_id = -1
class Ignore(DBObject):
pass
class Request(DBObject):
pass
class Inventory(DBObject):
pass
class Mail(DBObject):
def get_sent_on(self):
return int(time.mktime(self.sent_on.timetuple()))
class Membership(DBObject):
pass
class MusicTrack(DBObject):
shared = False
def __len__(self):
return self.length
def __str__(self, withNotes = False):
if not withNotes:
return '|'.join(map(str, [self.id, self.name, int(self.shared), self.likes]))
return '%'.join(map(str, [self.id, self.name, int(self.shared), self.notes, self.hash, self.likes]))
def __int__(self):
return self.id
class Puffle(DBObject):
state = x = y = 0
def __str__(self):
# puffle id|type|sub_type|name|adoption|food|play|rest|clean|hat|x|y|is_walking
return '|'.join(map(str, [int(self.id), int(self.type), self.subtype if int(self.subtype) != 0 else '',
self.name, self.adopt(), int(self.food), int(self.play), int(self.rest),
int(self.clean), int(self.hat), int(self.x), int(self.y), int(self.walking)]))
def adopt(self):
return int(time.mktime(self.adopted.timetuple()))
def updatePuffleStats(self, engine):
care_history = json.loads(self.lastcare)
now = time.time()
if care_history is None or len(care_history) < 1 or bool(int(self.backyard)) or self.walking:
care_history['food'] = care_history['play'] = care_history['bath'] = now
self.lastcare = json.dumps(care_history)
self.save()
return # ULTIMATE PUFFLE <indefinite health and energy>
last_fed = care_history['food']
last_played = care_history['play']
last_bathed = care_history['bath']
food, play, clean = int(self.food), int(self.play), int(self.clean)
puffleCrumb = engine.puffleCrumbs[self.subtype]
max_food, max_play, max_clean = puffleCrumb.hunger, 100, puffleCrumb.health
self.rest = 100 # It's in the igloo all this time?
self.save()
''' It afterall is a poor creature to be taken care of.
if not int(puffle.id) in self.penguin.engine.puffleCrumbs.defautPuffles:
return # They aren't to be taken care of
'''
'''
if remaining % < 10 : send a postcard blaming (hungry, dirty, or unhappy)
if remaining % < 2 : move puffle to pet store, delete puffle, send a postcard, sue 1000 coins as penalty
'''
fed_percent = food - 5 * ((now - last_fed)/86400) # delta_food = -5% per day
play_percent = play - 5 * ((now - last_played)/86400) # delta_play = -5% per day
clean_percent = clean - 10 * ((now - last_bathed)/86400) # delta_clean = -10% per day
total_percent = (fed_percent + play_percent + clean_percent) / 3.0
if fed_percent < 3 or total_percent < 6:
self.backyard = 1
self.food = 100
self.play = 100
self.clean = 100
self.save()
return
if fed_percent < 10:
pid = self.penguin_id
pname = self.name
def sendMail(mail):
if mail is not None:
sent = mail.sent_on
delta = (time.time() - sent)/3600/12
if delta < 1:
return
Mail(penguin_id=pid, from_user=0, type=110, description=str(pname)).save()
last_mail = Mail.find(where=['penguin_id = ? AND type = 110 AND description = ?', self.penguin_id, self.name], orderby='sent_on DESC', limit=1).addCallback(sendMail)
self.food = fed_percent
self.play = play_percent
self.clean = clean_percent
care_history['food'] = care_history['play'] = care_history['bath'] = now
self.lastcare = json.dumps(care_history)
self.save()
class Stamp(DBObject):
def __int__(self):
return int(self.stamp)
class StampCover(DBObject):
pass
class EPFCom(DBObject):
TABLENAME = 'epfcoms'
def getTime(self):
return int(time.mktime(self.time.timetuple()))
def __str__(self):
return '|'.join(map(str, [self.message, self.getTime(), self.mascot]))
class PenguinDB(object):
"""
<Server.Penguin> will extend this to get db operations
Syntax:
def db_<FunctionName> (*a, **kwa): << must be deferred and mustreturn a defer
> recommended to use with inlineCallbacks
"""
def __init__(self):
self.logger = logging.getLogger(TIMELINE_LOGGER)
self.dbpenguin = None
@inlineCallbacks
def db_init(self):
if self.dbpenguin is None:
column, value = 'username', self.penguin.username
if not self.penguin.id is None:
column, value = 'ID', self.penguin.id
elif not self.penguin.swid is None:
column, value = 'swid', self.penguin.swid
self.dbpenguin = yield Penguin.find(where = ['%s = ?' % column, value], limit = 1)
if self.dbpenguin is None:
raise Exception("[TE201] Penguin not found with {1} - {0}".format(value, column))
returnValue(True)
@inlineCallbacks
def db_nicknameUpdate(self, nick):
p_nickname = self.dbpenguin.nickname
self.dbpenguin.nickname = nick
done = self.dbpenguin.save()
if len(done.errors) > 0:
self.dbpenguin.nickname = p_nickname
for error in done.errors:
self.log('error', "[TE200] MySQL update nickname failed. Error :", error)
returnValue(False)
else:
returnValue(True)
@inlineCallbacks
def db_penguinExists(self, criteria = 'ID', value = None):
exists = yield Penguin.exists(["`%s` = ?" % criteria, value])
returnValue(exists)
@inlineCallbacks
def db_getPenguin(self, criteria, *values):
|
from abc import ABCMeta, abstractmethod
import json
import os
try:
import redis
except ImportError:
pass
from .config import Config
def get_cache(cache, config=None):
if isinstance(cache, str):
if cache == 'JsonCache':
return JsonCache()
elif cache == 'RedisCache':
if config is None:
config = Config()
return RedisCache(config)
else:
raise(Exception('Invalid string cache option specified.'))
else:
return cache
class Cache(object):
__metaclass__ = ABCMeta
@abstractmethod
def get(self, key):
pass
@abstractmethod
def set(self, key, value):
pass
@abstractmethod
def delete(self, key):
pass
class JsonCache(Cache):
def __init__(self, filename='cache.json', root_directory=None):
if root_directory is not None:
self.data_location = '{}/{}'.format(root_directory, filename)
else:
self.data_location = '{}/{}'.format(os.getcwd(), filename)
# if the file doesn't exist create a empty file with a json object
if not os.path.isfile(self.data_location):
with open(self.data_location, 'w+') as data_file:
data_file.write('{}')
def get(self, key):
with open(self.data_location) as data_file:
data = json.load(data_file)
if key in data:
value = data[key]
else:
value = None
return value
def set(self, key, value):
with open(self.data_location, 'r+') as data_file:
data = json.load(data_file)
data[key] = value
data_file.seek(0)
data_file.write(json.dumps(data))
data_file.truncate()
return True
def delete(self, key):
with open(self.data_location, 'r+') as data_file:
data = json.load(data_file)
if key not in data:
return False
data.pop(key, None)
data_file.seek(0)
data_file.write(json.dumps(data))
data_file.truncate()
return True
class RedisCache(Cache):
# currently loading is only from config file
def __init__(self, config):
self.redis_uri = config.get_config(None, 'URI', root='redis')
self.redis = None
self.redis_config = config.get_section_config('redis')
def get(self, key):
self._connect()
value = | self.redis.get(key)
if value is not None:
value = value.decode('UTF-8')
return value
def set(self, key, value):
self._connect()
result = self.redis.set(key, value)
if result > 0:
return True
else:
return False
def delete(self, key):
self._connect()
result = self.redis.delete(key)
| if result > 0:
return True
else:
return False
def _connect(self):
if self.redis is None:
self.redis = redis.StrictRedis.from_url(self.redis_uri)
for name, value in self.redis_config.items():
try:
self.redis.config_set(name, value)
except:
pass
# log...
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2017-02-17 10:06
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('basicviz', '0049_auto_20170216_2228'),
]
operations = [
migrations.CreateModel(
name='Decomposition',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128, unique=True)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Experiment')),
],
),
migrations.CreateModel(
name='DecompositionFeatureInstance',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intensity', models.FloatField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Document')),
],
),
migrations.CreateModel(
name='DocumentGlobalFeature',
fields=[
| ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('intensity', models.FloatField()),
('document', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Document')),
],
),
migrations.CreateModel(
name='FeatureMap',
fields=[
('id', models.AutoField(auto_created | =True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='FeatureSet',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('description', models.CharField(max_length=1024, null=True)),
],
),
migrations.CreateModel(
name='GlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=128)),
('min_mz', models.FloatField()),
('max_mz', models.FloatField()),
('featureset', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.FeatureSet')),
],
),
migrations.CreateModel(
name='GlobalMotif',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('originalmotif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Mass2Motif')),
],
),
migrations.CreateModel(
name='GlobalMotifGlobalFeature',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('probability', models.FloatField()),
('feature', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature')),
('motif', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalMotif')),
],
),
migrations.AddField(
model_name='featuremap',
name='globalfeature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
migrations.AddField(
model_name='featuremap',
name='localfeature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='basicviz.Feature'),
),
migrations.AddField(
model_name='documentglobalfeature',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
migrations.AddField(
model_name='decompositionfeatureinstance',
name='feature',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='decomposition.GlobalFeature'),
),
]
|
# -*- coding: utf-8 -*-
import django
from django.contrib. | auth.models import User
from django.test import TestCase, Client
# compat thing!
if django.VERSION[:2] < (1, 10):
from django.core.urlresolvers import reverse
else:
from django.urls import reverse
class FilerUtilsTests(TestCase):
def setUp(self):
self.client = Client()
self.user = User.objects.create_superuser(
username='fred',
password='test',
email='test@test.fred',
)
def tearDown(self):
pass
def test_has_c | ss(self):
self.client.login(username='fred', password='test')
url = reverse('admin:filer_folder_changelist')
response = self.client.get(url, follow=True)
self.assertEqual(response.status_code, 200)
|
-----------------------------------------------------------------
# The MIT License (MIT)
#
# Copyright (c) 2016 Jonathan Labéjof <jonathan.labejof@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# --------------------------------------------------------------------
"""Base schema package."""
from b3j0f.utils.version import OrderedDict
from inspect import getmembers
from six import iteritems
from uuid import uuid4
__all__ = ['Schema', 'DynamicValue']
class DynamicValue(object):
"""Handle a function in order to dynamically lead a value while cleaning a
schema.
For example, the schema attribute ``uuid`` uses a DynamicValue in order to
ensure default generation per instanciation.
"""
__slots__ = ['func']
def __init__(self, func, *args, **kwargs):
""":param func: function to execute while cleaning a schema."""
super(DynamicValue, self).__init__(*args, **kwargs)
self.func = func
def __call__(self):
return self.func()
class Schema(property):
"""Schema description.
A schema is identified by a string such as an universal unique identifier,
and optionnally a name.
Any setted value respect those conditions in this order:
1. if the value is a lambda expression, the value equals its execution.
2. the value is validated with this method `validate`.
3. the value is given to a custom setter (`fget` constructor parameter) if
given or setted to this attribute `_value`.
Once you defined your schema inheriting from this class, your schema will
be automatically registered in the registry and becomes accessible from the
`b3j0f.schema.reg.getschemabyuid` function.
"""
name = '' #: schema name. Default is self name.
#: schema universal unique identifier.
uuid = DynamicValue(lambda: str(uuid4()))
doc = '' #: schema description.
default = None #: schema default value.
required = [] #: required schema names.
version = '1' #: schema version.
nullable = True #: if True (default), value can be None.
def __init__(
self, fget=None, fset=None, fdel=None, doc=None, **kwargs
):
"""Instance attributes are setted related to arguments or inner schemas.
:param default: default value. If lambda, called at initialization.
"""
super(Schema, self).__init__(
fget=self._getter, fset=self._setter, fdel=self._deleter,
doc=doc
)
# set custom getter/setter/deleter
if fget or not hasattr(self, '_fget_'):
self._fget_ = fget
if fset or not hasattr(self, '_fset_'):
self._fset_ = fset
if fdel or not hasattr(self, '_fdel_'):
self._fdel_ = fdel
if doc is not None:
kwargs['doc'] = doc
cls = type(self)
# set inner schema values
for name, member in getmembers(cls):
if name[0] != '_' and name not in [
'fget', 'fset', 'fdel', 'setter', 'getter', 'deleter',
'default'
]:
if name in kwargs:
val = kwargs[name]
else:
val = member
if isinstance(val, DynamicValue):
val = val()
if isinstance(val, Schema):
val = val.default
if isinstance(val, DynamicValue):
val = val()
setattr(self, self._attrname(name=name), val)
if member != val:
setattr(self, name, val)
default = kwargs.get('default', self.default)
self._default_ = default
if default is not None:
self.default = default
def _attrname(self, name=None):
"""Get attribute name to set in order to keep the schema value.
:param str name: attribute name. Default is this name or uuid.
:return:
:rtype: str
"""
return '_{0}_'.format(name or self._name_ or self._uuid_)
def __repr__(self):
return '{0}({1}/{2})'.format(type(self).__name__, self.uuid, self.name)
def __hash__(self):
return hash(self.uuid)
def _getter(self, obj):
"""Called when the parent element tries to get this property value.
:param obj: parent object.
"""
result = None
if self._fget_ is not None:
result = self._fget_(obj)
if result is None:
result = getattr(obj, self._attrname(), self._default_)
# notify parent schema about returned value
if isinstance(obj, Schema):
obj._getvalue(self, result)
return result
def _getvalue(self, schema, value):
"""Fired when inner schema returns a value.
:param Schema schema: inner schema.
:param value: returned value.
"""
def _setter(self, obj, value):
"""Called when the parent element tries to set this property value.
:param obj: parent object.
:param value: new value to use. If lambda, updated with the lambda
result.
"""
if isinstance(value, DynamicValue): # execute lambda values.
fvalue = value()
else:
fvalue = value
self._validate(data=fvalue, owner=obj)
if self._fset_ is not None:
self._fset_(obj, fvalue)
else:
setattr(obj, self._attrname(), value)
# notify obj about the new value.
if isinstance(obj, Schema):
obj._setvalue(self, fvalue)
def _setvalue(self, schema, value):
"""Fired when inner schema change of value.
:param Schema schema: inner schema.
:param value: new value.
"""
def _deleter(self, obj):
"""Called when the parent element tries to delete this property value.
:param obj: parent object.
"""
if self._fdel_ is not None:
self._fdel_(obj)
else:
delattr(obj, self._attrname())
# notify parent schema about value deletion.
if isinstance(obj, Schema):
obj._delvalue(self)
def _delvalue(self, schema):
"""Fired when inner schema delete its value.
:param Schema schema: inner schema.
"""
def _validate(self, data, owner=None):
"""Validate input data in returning an empty list if true.
:param data: data to validate with this schema.
:param Schema owner: schema owner.
:raises: Exception if the data is not validated.
"""
if isinstance(data, DynamicValue):
data = data()
if data is None and not self.nullable:
raise ValueError('Value can not be null')
elif data is not None:
isdict = isinstance(data, dict)
for name, schema in iteritems(self.getschemas()):
if name == 'default':
continue
if name in self.required:
| if (
| (isdict and name not in data) or
|
import os
from ..helper import freq_to_mel
from ..praat import PraatAnalysisFunction
class PraatMfccFunction(PraatAnalysisFunction):
def __init__(self, praat_path=None, windo | w_length=0.025, time_step=0.01, max_frequency=7800,
num_coefficients=13):
script_dir = os.path.dirname(os.path.abspath(__file__))
script = os.path.join(script_dir, 'mfcc.praat')
arguments = [num_coe | fficients, window_length, time_step, freq_to_mel(max_frequency)]
super(PraatMfccFunction, self).__init__(script, praat_path=praat_path, arguments=arguments)
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class OperationListResult(Model): |
"""The list of available operations for Data Lake Store.
Variables are only populated by the server, and wi | ll be ignored when
sending a request.
:ivar value: the results of the list operation.
:vartype value: list[~azure.mgmt.datalake.store.models.Operation]
:ivar next_link: the link (url) to the next page of results.
:vartype next_link: str
"""
_validation = {
'value': {'readonly': True},
'next_link': {'readonly': True},
}
_attribute_map = {
'value': {'key': 'value', 'type': '[Operation]'},
'next_link': {'key': 'nextLink', 'type': 'str'},
}
def __init__(self):
super(OperationListResult, self).__init__()
self.value = None
self.next_link = None
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'RssFeed'
db.create_table(u'rsssync_rssfeed', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=256)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal(u'rsssync', ['RssFeed'])
# Adding model 'RssEntry'
db.create_table(u'rsssync_rssentry', (
( | u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('title', self.gf('django.db.models.fields.CharField')(max_length=500)),
('summary', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('link', self.gf('django.db.models.fields.URLField' | )(max_length=200, null=True, blank=True)),
('date', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('feed', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['rsssync.RssFeed'])),
))
db.send_create_signal(u'rsssync', ['RssEntry'])
def backwards(self, orm):
# Deleting model 'RssFeed'
db.delete_table(u'rsssync_rssfeed')
# Deleting model 'RssEntry'
db.delete_table(u'rsssync_rssentry')
models = {
u'rsssync.rssentry': {
'Meta': {'object_name': 'RssEntry'},
'date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['rsssync.RssFeed']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'summary': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'rsssync.rssfeed': {
'Meta': {'object_name': 'RssFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'})
}
}
complete_apps = ['rsssync'] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import io
import re
from os.path import join, dirname
from setuptools import setup, find_packages
RE_MD_CODE_BLOCK = re.compile(r'```(?P<lan | guage>\w+)?\n(?P<lines>.*?)```', re.S)
RE_SELF_LINK = re.compile(r'\[(.*?)\]\[\]')
RE_LINK_TO_URL = re.compile(r'\[(?P<text>.*?)\]\((?P<url>.*?)\)')
RE_LINK_TO_REF = re.compile(r'\[(?P<text>.*?)\]\[(?P<ref>.*?)\]')
RE_LINK_REF = re.compile(r'^\[(?P<key>[^!].*?)\]:\s*(?P<url>.*)$', re.M)
RE_BADGE = re.compile(r'^\[\!\[(?P<text>.*?)\]\[(?P<badge>.*?)\]\]\[(?P<target>.*?)\]$', re.M)
RE_TITLE = re.compile(r'^(?P<level | >#+)\s*(?P<title>.*)$', re.M)
BADGES_TO_KEEP = []
RST_TITLE_LEVELS = ['=', '-', '*']
RST_BADGE = '''\
.. image:: {badge}
:target: {target}
:alt: {text}
'''
def md2pypi(filename):
'''
Load .md (markdown) file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badges
'''
content = io.open(filename).read()
for match in RE_MD_CODE_BLOCK.finditer(content):
rst_block = '\n'.join(
['.. code-block:: {language}'.format(**match.groupdict()), ''] +
[' {0}'.format(l) for l in match.group('lines').split('\n')] +
['']
)
content = content.replace(match.group(0), rst_block)
refs = dict(RE_LINK_REF.findall(content))
content = RE_LINK_REF.sub('.. _\g<key>: \g<url>', content)
content = RE_SELF_LINK.sub('`\g<1>`_', content)
content = RE_LINK_TO_URL.sub('`\g<text> <\g<url>>`_', content)
for match in RE_BADGE.finditer(content):
if match.group('badge') not in BADGES_TO_KEEP:
content = content.replace(match.group(0), '')
else:
params = match.groupdict()
params['badge'] = refs[match.group('badge')]
params['target'] = refs[match.group('target')]
content = content.replace(match.group(0),
RST_BADGE.format(**params))
# Must occur after badges
for match in RE_LINK_TO_REF.finditer(content):
content = content.replace(match.group(0), '`{text} <{url}>`_'.format(
text=match.group('text'),
url=refs[match.group('ref')]
))
for match in RE_TITLE.finditer(content):
underchar = RST_TITLE_LEVELS[len(match.group('level')) - 1]
title = match.group('title')
underline = underchar * len(title)
full_title = '\n'.join((title, underline))
content = content.replace(match.group(0), full_title)
return content
long_description = '\n'.join((
md2pypi('README.md'),
md2pypi('CHANGELOG.md'),
''
))
setup(
name='my-theme',
version='0.1.0',
description='My awesome theme',
long_description=long_description,
url='https://theme.opendata.team',
author='OpenDataTeam',
author_email='me@somewhere.com',
packages=['my_theme'],
include_package_data=True,
install_requires=[],
entry_points={
'udata.themes': [
'awesome-theme = my_theme'
]
},
license='AGPL',
zip_safe=False,
keywords='udata, theme, My Theme',
classifiers=[
'Development Status :: 3 - Alpha',
'Programming Language :: Python',
'Environment :: Web Environment',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: System :: Software Distribution',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import unittest
from openerp.tools.translate import quote, unquote, xml_translate
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
def test_translate_xml_base(self):
""" Test xml_translate() without formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah blah blah</h1>
Put some more text here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah', 'Put some more text here'])
def test_translate_xml_inline1(self):
""" Test xml_translate() with formatting elements. """
terms = []
source = """<form string="Form stuff">
<h1>Blah <i>blah</i> blah</h1>
Put some <b>more text</b> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put some <b>more text</b> here'])
def test_translate_xml_inline2(self):
""" Test xml_translate() with formatting elements embedding other elements. """
terms = []
source = """<form string="Form stuff">
<b><h1>Blah <i>blah</i> blah</h1></b>
Put <em>some <b>more text</b></em> here
<field name="foo"/>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah <i>blah</i> blah', 'Put <em>some <b>more text</b></em> here'])
def test_translate_xml_inline3(self):
""" Test xml_translate() with formatting elements without actual text. """
terms = []
source = """<form string="Form stuff">
<div>
<span class="before"/>
<h1>Blah blah blah</h1>
<span class="after">
<i class="hack"/>
</span>
</div>
</form>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Form stuff', 'Blah blah blah'])
def test_translate_xml_t(self):
""" Test xml_translate() with t-* attributes. """
terms = []
source = """<t t-name="stuff">
stuff before
<span t-field="o.name"/>
stuff after
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_off(self):
""" Test xml_translate() with attribute translate="off". """
terms = []
source = """<div>
stuff before
<div translation="off">Do not translate this</div>
stuff after
</div>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['stuff before', 'stuff after'])
def test_translate_xml_attribute(self):
""" Test xml_translate() with <attribute> elements. """
terms = []
source = """<field name="foo" position="attributes">
<attribute name="string">Translate this</attribute>
<attribute name="option">Do not translate this</attribute>
</field>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['Translate this'])
def test_translate_xml_a(self):
""" Test xml_translate() with <a> elements. """
terms = []
source = """<t t-name="stuff">
<ul class="nav navbar-nav">
<li>
<a class="oe_menu_leaf" href="/web#menu_id=42&action=54">
<span class="oe_menu_text"> | Blah</span>
</a>
</li>
<li class="dropdown" id="menu_more_container" style="display: none;">
<a class="dropdown-toggle" data-toggle="dropdown" href="#">More <b class="caret"/></a>
<ul class="dropdown-me | nu" id="menu_more"/>
</li>
</ul>
</t>"""
result = xml_translate(terms.append, source)
self.assertEquals(result, source)
self.assertItemsEqual(terms,
['<span class="oe_menu_text">Blah</span>', 'More <b class="caret"/>'])
|
DeHaan <michael.dehaan@gmail.com>
# (c) 2015 Toshio Kuratomi <tkuratomi@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
# from python and deps
from six.moves import StringIO
import json
import os
import shlex
# from Ansible
from ansible import __version__
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.utils.jsonify import jsonify
from ansible.utils.unicode import to_bytes
REPLACER = "#<<INCLUDE_ANSIBLE_MODULE_COMMON>>"
REPLACER_ARGS = "\"<<INCLUDE_ANSIBLE_MODULE_ARGS>>\""
REPLACER_COMPLEX = "\"<<INCLUDE_ANSIBLE_MODULE_COMPLEX_ARGS>>\""
REPLACER_WINDOWS = "# POWERSHELL_COMMON"
REPLACER_WINARGS = "<<INCLUDE_ANSIBLE_MODULE_WINDOWS_ARGS>>"
REPLACER_JSONARGS = "<<INCLUDE_ANSIBLE_MODULE_JSON_ARGS>>"
REPLACER_VERSION = "\"<<ANSIBLE_VERSION>>\""
# We could end up writing out parameters with unicode characters so we need to
# specify an encoding for the python source file
ENCODING_STRING = '# -*- co | ding: utf-8 -*-'
# we've moved the module_common relative to the snippets, so fix the path
_SNIPPET_PATH = os.path.join(os.path.dirname(__file__), '..', 'module_utils')
# *************************************** | ***************************************
def _slurp(path):
if not os.path.exists(path):
raise AnsibleError("imported module support code does not exist at %s" % path)
fd = open(path)
data = fd.read()
fd.close()
return data
def _find_snippet_imports(module_data, module_path, strip_comments):
"""
Given the source of the module, convert it to a Jinja2 template to insert
module code and return whether it's a new or old style module.
"""
module_style = 'old'
if REPLACER in module_data:
module_style = 'new'
elif REPLACER_WINDOWS in module_data:
module_style = 'new'
elif REPLACER_JSONARGS in module_data:
module_style = 'new'
elif 'from ansible.module_utils.' in module_data:
module_style = 'new'
elif 'WANT_JSON' in module_data:
module_style = 'non_native_want_json'
output = StringIO()
lines = module_data.split('\n')
snippet_names = []
for line in lines:
if REPLACER in line:
output.write(_slurp(os.path.join(_SNIPPET_PATH, "basic.py")))
snippet_names.append('basic')
if REPLACER_WINDOWS in line:
ps_data = _slurp(os.path.join(_SNIPPET_PATH, "powershell.ps1"))
output.write(ps_data)
snippet_names.append('powershell')
elif line.startswith('from ansible.module_utils.'):
tokens=line.split(".")
import_error = False
if len(tokens) != 3:
import_error = True
if " import *" not in line:
import_error = True
if import_error:
raise AnsibleError("error importing module in %s, expecting format like 'from ansible.module_utils.basic import *'" % module_path)
snippet_name = tokens[2].split()[0]
snippet_names.append(snippet_name)
output.write(_slurp(os.path.join(_SNIPPET_PATH, snippet_name + ".py")))
else:
if strip_comments and line.startswith("#") or line == '':
pass
output.write(line)
output.write("\n")
if not module_path.endswith(".ps1"):
# Unixy modules
if len(snippet_names) > 0 and not 'basic' in snippet_names:
raise AnsibleError("missing required import in %s: from ansible.module_utils.basic import *" % module_path)
else:
# Windows modules
if len(snippet_names) > 0 and not 'powershell' in snippet_names:
raise AnsibleError("missing required import in %s: # POWERSHELL_COMMON" % module_path)
return (output.getvalue(), module_style)
# ******************************************************************************
def modify_module(module_path, module_args, task_vars=dict(), strip_comments=False):
"""
Used to insert chunks of code into modules before transfer rather than
doing regular python imports. This allows for more efficient transfer in
a non-bootstrapping scenario by not moving extra files over the wire and
also takes care of embedding arguments in the transferred modules.
This version is done in such a way that local imports can still be
used in the module code, so IDEs don't have to be aware of what is going on.
Example:
from ansible.module_utils.basic import *
... will result in the insertion of basic.py into the module
from the module_utils/ directory in the source tree.
All modules are required to import at least basic, though there will also
be other snippets.
For powershell, there's equivalent conventions like this:
# POWERSHELL_COMMON
which results in the inclusion of the common code from powershell.ps1
"""
### TODO: Optimization ideas if this code is actually a source of slowness:
# * Fix comment stripping: Currently doesn't preserve shebangs and encoding info (but we unconditionally add encoding info)
# * Use pyminifier if installed
# * comment stripping/pyminifier needs to have config setting to turn it
# off for debugging purposes (goes along with keep remote but should be
# separate otherwise users wouldn't be able to get info on what the
# minifier output)
# * Only split into lines and recombine into strings once
# * Cache the modified module? If only the args are different and we do
# that as the last step we could cache sll the work up to that point.
with open(module_path) as f:
# read in the module source
module_data = f.read()
(module_data, module_style) = _find_snippet_imports(module_data, module_path, strip_comments)
module_args_json = json.dumps(module_args).encode('utf-8')
python_repred_args = repr(module_args_json)
# these strings should be part of the 'basic' snippet which is required to be included
module_data = module_data.replace(REPLACER_VERSION, repr(__version__))
module_data = module_data.replace(REPLACER_COMPLEX, python_repred_args)
module_data = module_data.replace(REPLACER_WINARGS, module_args_json)
module_data = module_data.replace(REPLACER_JSONARGS, module_args_json)
if module_style == 'new':
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in task_vars:
facility = task_vars['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split(b"\n", 1)
shebang = None
if lines[0].startswith(b"#!"):
shebang = lines[0].strip()
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in task_vars:
interpreter = to_bytes(task_vars[interpreter_config], errors='strict')
lines[0] = shebang = b"#!{0} {1}".format(interpreter, b" ".join(args[1:]))
if os.path.basename(interpreter).startswith('python'):
lines.insert(1, ENCODING_STRING)
else:
# No shebang, assume a binary module?
pass
module_data = b"\n".join(lines)
|
ject = None
self.inject = INJECT_READY
def addControlCard(self, cards):
cardDicts = []
for cardMain in cards:
cardCtrl = sim_card.SimCard(mode=cardMain.mode, type=self.simType)
if cardMain.mode == sim_reader.MODE_SIM_SOFT:
#TODO: try to remove
cardCtrl.simReader = cardMain.simReader
#TODO: reimplement to not copy all parameter
cardCtrl.index = cardMain.index
cardCtrl.atr = cardMain.atr
#cardCtrl.swNoError = cardMain.swNoError
cardCtrl.type = cardMain.type
cardCtrl.logicalChannelClosed = cardMain.logicalChannelClosed
# Do not apply ins and file forwarding rules on control interface.
cardCtrl.removeRoutingAttr()
cardDict = {MAIN_INTERFACE : cardMain, CTRL_INTERFACE : cardCtrl}
cardDicts.append(cardDict)
return cardDicts
def usbCtrlOut(self, req, buf):
if self.mode == SIMTRACE_OFFLINE:
return []
return self.dev.ctrl_transfer(0x40,
bRequest=req, # R-APDU
data_or_wLength=buf,
timeout=500)
def usbCtrlIn(self, req):
return self.dev.ctrl_transfer(0xC0,
bRequest=req,
data_or_wLength=512,
timeout=512)
def receiveData(self, cmd):
if self.mode == SIMTRACE_OFFLINE:
return []
try:
return self.usbCtrlIn(cmd)
except:
time.sleep(0.2)
return self.usbCtrlIn(cmd)
def sendData(self, msg):
return self.usbCtrlOut(CMD_R_APDU, msg)
def resetCards(self, soft=True):
if soft:
resetThread = ResetThread(self)
resetThread.setDaemon(True)
# Start handling C-APDUs.
resetThread.start()
else:
for cardDict in self.cardsDict:
cardDict[MAIN_INTERFACE].reset()
def receiveCommandApdu(self):
msg = []
# FIXME: This is the main event loop. Move it to top level.
msg = list(self.receiveData(CMD_POLL))
if not len(msg):
return None, None
data = None
evt = msg[0]
if evt == EVT_C_APDU:
data = msg[4:]
elif evt == EVT_RESET:
pass
elif evt == EVT_UNKNOWN:
return None, None
else:
self.loggingApdu.info("unknown event: %s\n" % hextools.bytes2hex(msg))
return (evt, data)
def sendResponseApdu(self, msg):
self.sendData(msg)
def command(self, tag, payload=[]): # dummy byte
self.loggingApdu.debug("CMD %d %s" % (tag, hextools.bytes2hex(payload)))
self.usbCtrlOut(tag, payload)
def aidCommon(self, card):
if not card.routingAttr:
return False
return set(sim_card.FILES_AID).issubset(set(card.routingAttr.filesCommon))
def getSoftCardDict(self):
for cardDict in self.cardsDict:
if cardDict[MAIN_INTERFACE].mode == sim_reader.MODE_SIM_SOFT:
return cardDict
return None
def getFileHandler(self, file):
#by default execute apdu in card 0
cards = [self.cardsDict[0][MAIN_INTERFACE]]
for cardDict in self.cardsDict:
if cardDict == self.cardsDict[0]:
#cardDict already in cards
continue
card = cardDict[MAIN_INTERFACE]
if file in card.routingAttr.filesCommon:
cards.append(card)
elif file in card.routingAttr. | filesReplaced:
return [card]
return cards
def getInsHandler(self, ins, apdu):
#by default execute apdu in card 0
cards = [self.cardsDict[0][MAIN_INTERFACE]]
for cardDict in self.cardsDict:
if cardDict == self.cardsDict[0]:
#cardDict already in | cards
continue
card = cardDict[MAIN_INTERFACE]
if (ins == 'GET_RESPONSE' and
card.routingAttr.getFileSelected(apdu[0]) == 'AUTH' and
'INTERNAL_AUTHENTICATE' in card.routingAttr.insReplaced):
return [card]
elif ins in card.routingAttr.insCommon:
if (ins in ['GET_RESPONSE','SELECT_FILE'] and
card.routingAttr.getFileSelected(apdu[0]) in card.routingAttr.filesReplaced):
cards.insert(0, card)
else:
cards.append(card)
elif ins in card.routingAttr.insReplaced:
if ins == 'INTERNAL_AUTHENTICATE':
card.routingAttr.setFileSelected('AUTH', apdu[0])
return [card]
return cards
def addLeftHandlers(self, cards):
for cardDict in self.cardsDict:
card = cardDict[MAIN_INTERFACE]
if card in cards:
continue
cards.append(card)
return cards
def getHandlers(self, apdu, inject=None):
cardsData = []
if inject == INJECT_NO_FORWARD:
if self.apduInjectedCard:
cardsData.append([self.apduInjectedCard, 0])
else:
cardsData.append([self.getCtrlCard(0), 0])
return cardsData
ins = types.insName(apdu)
if ins == 'SELECT_FILE':
for cardDict in self.cardsDict:
card = cardDict[MAIN_INTERFACE]
#TODO: handle read/write/update command with SFI in P1
card.routingAttr.setFileSelected(self.fileName(apdu), apdu[0])
if ins in sim_card.FILE_INS:
cards = self.getFileHandler(self.cardsDict[0][MAIN_INTERFACE].routingAttr.getFileSelected(apdu[0]))
else:
cards = self.getInsHandler(ins, apdu)
i = 0;
forwardApdu = True
for card in cards:
if i != 0:
forwardApdu = False
cardsData.append([card, forwardApdu])
i += 1
return cardsData
def handleApdu(self, cardData, apdu):
card = cardData[0]
sendData = cardData[1]
if card == None:
raise Exception("card not initialized")
ins = types.insName(apdu)
if card != self.getMainCard(0):
origApdu = apdu
if ( self.aidCommon(card) and
card.routingAttr.aidToSelect and
self.getMainCard(0).routingAttr.aidToSelect == hextools.bytes2hex(apdu) and #origin apdu is AID
int(card.routingAttr.aidToSelect[0:2], 16) == apdu[0]): #check the same class
apdu = hextools.hex2bytes(card.routingAttr.aidToSelect)
card.routingAttr.aidToSelect = None
elif ( self.aidCommon(card) and
card.routingAttr.getFileSelected(apdu[0]) == 'EF_DIR' and
ins == 'READ_RECORD' and
card.routingAttr.recordEfDirLength):
apdu[4] = card.routingAttr.recordEfDirLength
if origApdu != apdu:
self.loggingApdu.info("")
self.loggingApdu.info("*C-APDU%d: %s" %(self.getSimId(card), hextools.bytes2hex(apdu)))
if self.simType == types.TYPE_SIM and (apdu[0] & 0xF0) != 0xA0:
#force 2G on USIM cards
sw = types_g.sw.CLASS_NOT_SUPPORTED
sw1 = sw>>8
sw2 = sw & 0x00FF
responseApdu = [sw1, sw2]
elif ins == 'GET_RESPONSE' and card.routingAttr.getResponse:
responseApdu = card.routingAttr.getResponse
card.routingAttr.getResponse = None
else:
responseApdu = card.apdu(apdu)
if card != self.getMainCard(0):
if (self.aidCommon(card) and
card.routingAttr.getFileSelected(apdu[0]) == 'EF_DIR' and
ins == 'GET_RESPONSE' and
types.swNoError(responseApdu) and
len(responseApdu) > 7):
card.routingAttr.recordEfDirL |
==================================
# pylint: disable=unused-import,g-bad-import-order
"""Contains the core layers: Dense, Dropout.
Also contains their functional aliases.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from six.moves import xrange # pylint: disable=redefined-builtin
import numpy as np
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import standard_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.layers import base
from tensorflow.python.layers import utils
class Dense(base._Layer): # pylint: disable=protected-access
"""Densely-connected layer class.
This layer implements the operation `outputs = activation(inputs.w + b)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `w` is a weights matrix created by the layer,
and `b` is a bias vector created by the layer (only if `use_bias` is `True`).
Note: if the input to the layer has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `w`.
Arguments:
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
weights_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
weights_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer. Layers with the same name will
share weights, but to avoid mistakes we require reuse=True in such cases.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Properties:
units: Python integer, dimensionality of the output space.
activation: Activation function (callable).
use_bias: Boolean, whether the layer uses a bias.
weights_initializer: Initializer instance (or name) for the weight matrix.
bias_initializer: Initializer instance (or name) for the bias.
weights_regularizer: Regularizer instance for the weight matrix (callable)
bias_regularizer: Regularizer instance for the bias (callable).
activity_regularizer: Regularizer instance for the output (callable)
weights: Weight matrix (TensorFlow variable or tensor).
bias: Bias vector, if applicable (TensorFlow variable or tensor).
"""
def __init__(self, units,
activation=None,
use_bias=True,
weights_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
**kwargs):
super(Dense, self).__init__(trainable=trainable, name=name, **kwargs)
self.units = units
self.activation = activation
self.use_bias = use_bias
self.weights_initializer = weights_initializer
self.bias_initializer = bias_initializer
self.weights_regularizer = weights_regularizer
self.bias_regularizer = bias_regularizer
self.activity_regularizer = activity_regularizer
def build(self, input_shape):
input_shape = tensor_shape.TensorShape(input_shape)
if input_shape.ndims is None:
raise ValueError('Inputs to `Dense` should have known rank.')
if len(input_shape) < 2:
raise ValueError('Inputs to `Dense` should have rank >= 2.')
if input_shape[-1].value is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
# Note that we set `trainable=True` because this is a trainable
# weight of the layer. If the layer is not trainable
# (self.trainable = False), the variable will not be added to
# tf.trainable_variables(), and self.trainable_weights will be empty.
self.w = vs.get_variable( | 'weights',
shape=[input_shape[-1].value, self.units],
initializer=self.weights_initializer,
regularizer=self.weights_regularizer,
dtype=self.dtype,
trainable=True)
if self.use_bias:
self.bias = vs.get_variable('bias',
| shape=[self.units,],
initializer=self.bias_initializer,
regularizer=self.bias_regularizer,
dtype=self.dtype,
trainable=True)
else:
self.bias = None
def call(self, inputs):
shape = inputs.get_shape().as_list()
input_dim = shape[-1]
output_shape = shape[:-1] + [self.units]
if len(output_shape) > 2:
# Reshape the input to 2D.
output_shape_tensors = array_ops.unpack(array_ops.shape(inputs))
output_shape_tensors[-1] = self.units
output_shape_tensor = array_ops.stack(output_shape_tensors)
inputs = array_ops.reshape(inputs, [-1, input_dim])
outputs = standard_ops.matmul(inputs, self.w)
if self.use_bias:
outputs = nn.bias_add(outputs, self.bias)
if len(output_shape) > 2:
# Reshape the output back to the original ndim of the input.
outputs = array_ops.reshape(outputs, output_shape_tensor)
outputs.set_shape(output_shape)
if self.activation is not None:
return self.activation(outputs) # pylint: disable=not-callable
return outputs
def dense(
inputs, units,
activation=None,
use_bias=True,
weights_initializer=None,
bias_initializer=init_ops.zeros_initializer(),
weights_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
trainable=True,
name=None,
reuse=False):
"""Functional interface for the densely-connected layer.
This layer implements the operation `outputs = activation(inputs.w + b)`
Where `activation` is the activation function passed as the `activation`
argument (if not `None`), `w` is a weights matrix created by the layer,
and `b` is a bias vector created by the layer (only if `use_bias` is `True`).
Note: if the `inputs` tensor has a rank greater than 2, then it is
flattened prior to the initial matrix multiply by `w`.
Arguments:
inputs: Tensor input.
units: Integer or Long, dimensionality of the output space.
activation: Activation function (callable). Set it to None to maintain a
linear activation.
use_bias: Boolean, whether the layer uses a bias.
weights_initializer: Initializer function for the weight matrix.
bias_initializer: Initializer function for the bias.
weights_regularizer: Regularizer function for the weight matrix.
bias_regularizer: Regularizer function for the bias.
activity_regularizer: Regularizer function for the output.
trainable: Boolean, if `True` also add variables to the graph collection
`GraphKeys.TRAINABLE_VARIABLES` (see tf.Variable).
name: String, the name of the layer.
reuse: Boolean, whether to reuse the weights of a previous layer
by the same name.
Returns:
Output tensor.
"""
layer = Dense(units,
activation=activation,
use_bias=use_bias,
weights_initializer=weights_initializer,
bias_initializer=bias_initializer,
weights_regularizer=weights_regularizer,
bias_regularizer=bias_regularizer,
activity_regularizer=activity_regularizer,
trainable=trainable,
name=name,
|
self.hogCPU = 0.035
self.timesleep = self.TR
self.volumes = int(volumes)
self.sync = sync
self.skip = skip
self.playSound = sound
if self.playSound: # pragma: no cover
self.sound1 = Sound(800, secs=self.TA, volume=0.15, autoLog=False)
self.sound2 = Sound(813, secs=self.TA, volume=0.15, autoLog=False)
self.clock = core.Clock()
self.stopflag = False
threading.Thread.__init__(self, None, 'SyncGenerator', None)
self.running = False
def run(self):
self.running = True
if self.skip:
for i in range(int(self.skip)):
if self.playSound: # pragma: no cover
self.sound1.play()
self.sound2.play()
# emulate T1 stabilization without data collection
core.wait(self.TR, hogCPUperiod=0)
self.clock.reset()
for vol in range(1, self.volumes + 1):
if self.playSound: # pragma: no cover
self.sound1.play()
self.sound2.play()
if self.stopflag:
| break
# "emit" a sync pulse by placing a key in the buffer:
event._onPygletKey(symbol=self.sync, modifiers=0,
emulated=True)
# wait for start of next volume, doing our own hogCPU for
# tighter sync:
core.wait(self.timesleep - self.hogCPU, hogCPUperiod=0)
while self.clock.getTime() < vol * self.TR:
pass # hogs the CPU for tighter sync
self.running = False
| return self
def stop(self):
self.stopflag = True
def launchScan(win, settings, globalClock=None, simResponses=None,
mode=None, esc_key='escape',
instr='select Scan or Test, press enter',
wait_msg="waiting for scanner...",
wait_timeout=300, log=True):
"""Accepts up to four fMRI scan parameters (TR, volumes, sync-key, skip),
and launches an experiment in one of two modes: Scan, or Test.
:Usage:
See Coder Demo -> experiment control -> fMRI_launchScan.py.
In brief: 1) from psychopy.hardware.emulator import launchScan;
2) Define your args; and 3) add 'vol = launchScan(args)'
at the top of your experiment script.
launchScan() waits for the first sync pulse and then returns, allowing
your experiment script to proceed. The key feature is that, in test mode,
it first starts an autonomous thread that emulates sync pulses (i.e.,
emulated by your CPU rather than generated by an MRI machine). The
thread places a character in the key buffer, exactly like a keyboard
event does. launchScan will wait for the first such sync pulse (i.e.,
character in the key buffer). launchScan returns the number of sync pulses
detected so far (i.e., 1), so that a script can account for them
explicitly.
If a globalClock is given (highly recommended), it is reset to 0.0 when
the first sync pulse is detected. If a mode was not specified when calling
launchScan, the operator is prompted to select Scan or Test.
If **scan mode** is selected, the script will wait until the first scan
pulse is detected. Typically this would be coming from the scanner, but
note that it could also be a person manually pressing that key.
If **test mode** is selected, launchScan() starts a separate thread to
emit sync pulses / key presses. Note that this thread is effectively
nothing more than a key-pressing metronome, emitting a key at the start
of every TR, doing so with high temporal precision.
If your MR hardware interface does not deliver a key character as a sync
flag, you can still use launchScan() to test script timing. You have to
code your experiment to trigger on either a sync character (to test
timing) or your usual sync flag (for actual scanning).
:Parameters:
win: a :class:`~psychopy.visual.Window` object (required)
settings : a dict containing up to 5 parameters
(2 required: TR, volumes)
TR :
seconds per whole-brain volume (minimum value = 0.1s)
volumes :
number of whole-brain (3D) volumes to obtain in a given
scanning run.
sync :
(optional) key for sync timing, default = '5'.
skip :
(optional) how many volumes to silently omit initially
(during T1 stabilization, no sync pulse). default = 0.
sound :
(optional) whether to play a sound when simulating scanner
sync pulses
globalClock :
optional but highly recommended :class:`~psychopy.core.Clock` to
be used during the scan; if one is given, it is reset to 0.000
when the first sync pulse is received.
simResponses :
optional list of tuples [(time, key), (time, key), ...]. time
values are seconds after the first scan pulse is received.
esc_key :
key to be used for user-interrupt during launch.
default = 'escape'
mode :
if mode is 'Test' or 'Scan', launchScan() will start in that mode.
instr :
instructions to be displayed to the scan operator during mode
selection.
wait_msg :
message to be displayed to the subject while waiting for the
scan to start (i.e., after operator indicates start but before
the first scan pulse is received).
wait_timeout :
time in seconds that launchScan will wait before assuming
something went wrong and exiting. Defaults to 300sec (5 min).
Raises a RuntimeError if no sync pulse is received in the
allowable time.
"""
if not 'sync' in settings:
settings.update({'sync': '5'})
if not 'skip' in settings:
settings.update({'skip': 0})
try:
wait_timeout = max(0.01, float(wait_timeout))
except ValueError:
msg = "wait_timeout must be number-like, but instead it was {}."
raise ValueError(msg.format(wait_timeout))
settings['sync'] = "{}".format(settings['sync']) # convert to str/unicode
settings['TR'] = float(settings['TR'])
settings['volumes'] = int(settings['volumes'])
settings['skip'] = int(settings['skip'])
msg = "vol: %(volumes)d TR: %(TR).3fs skip: %(skip)d sync: '%(sync)s'"
runInfo = msg % settings
if log: # pragma: no cover
logging.exp('launchScan: ' + runInfo)
instructions = visual.TextStim(
win, text=instr, height=.05, pos=(0, 0), color=.4, autoLog=False)
parameters = visual.TextStim(
win, text=runInfo, height=.05, pos=(0, -0.5), color=.4, autoLog=False)
# if a valid mode was specified, use it; otherwise query via RatingScale:
mode = "{}".format(mode).capitalize()
if mode not in ['Scan', 'Test']:
run_type = visual.RatingScale(win, choices=['Scan', 'Test'],
marker='circle',
markerColor='DarkBlue', size=.8,
stretch=.3, pos=(0.8, -0.9),
markerStart='Test',
lineColor='DarkGray', autoLog=False)
while run_type.noResponse:
instructions.draw()
parameters.draw()
run_type.draw()
win.flip()
if event.getKeys([esc_key]):
break
mode = run_type.getRating()
doSimulation = bool(mode == 'Test')
win.mouseVisible = False
if doSimulation:
wait_msg += ' (simulation)'
msg = visual.TextStim(win, color='DarkGray', text=wait_msg, autoLog=False)
msg.draw()
win.flip()
event.clearEvents() # do before starting the threads
if doSimulation:
syncPulse = SyncGenerator(**settings)
syncPulse.start() # start emitting sync pulses
core.runningThreads |
the License, or
# (at your option) any later version.
#
# Headphones is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Headphones. If not, see <http://www.gnu.org/licenses/>.
import tarfile
import platform
import subprocess
import re
import os
import headphones
from headphones import logger, version, request
def runGit(args):
if headphones.CONFIG.GIT_PATH:
git_locations = ['"' + headphones.CONFIG.GIT_PATH + '"']
else:
git_locations = ['git']
if platform.system().lower() == 'darwin':
git_locations.append('/usr/local/git/bin/git')
output = err = None
for cur_git in git_locations:
cmd = cur_git + ' ' + args
try:
logger.debug('Trying to execute: "' + cmd + '" with shell in ' + headphones.PROG_DIR)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
cwd=headphones.PROG_DIR)
output, err = p.communicate()
output = output.strip()
logger.debug('Git output: ' + output)
except OSError:
logger.debug('Command failed: %s', cmd)
continue
if 'not found' in output or "not recognized as an internal or external command" in output:
logger.debug('Unable to find git with command ' + cmd)
output = None
elif 'fatal:' in output or err:
logger.error('Git returned bad info. Are you sure this is a git installation?')
output = None
elif output:
break
return (output, err)
def getVersion():
if version.HEADPHONES_VERSION.startswith('win32build'):
headphones.INSTALL_TYPE = 'win'
# Don't have a way to update exe yet, but don't want to set VERSION to None
return 'Windows Install', 'master'
elif os.path.isdir(os.path.join(headphones.PROG_DIR, '.git')):
headphones.INSTALL_TYPE = 'git'
output, err = runGit('rev-parse HEAD')
if not output:
logger.error('Couldn\'t find latest installed version.')
cur_commit_hash = None
cur_commit_hash = str(output)
if not re.match('^[a-z0-9]+$', cur_commit_hash):
logger.error('Output doesn\'t look like a hash, not using it')
cur_commit_hash = None
if headphones.CONFIG.DO_NOT_OVERRIDE_GIT_BRANCH and headphones.CONFIG.GIT_BRANCH:
branch_name = headphones.CONFIG.GIT_BRANCH
else:
branch_name, err = runGit('rev-parse --abbrev-ref HEAD')
branch_name = branch_name
if not branch_name and headphones.CONFIG.GIT_BRANCH:
logger.error(
'Could not retrieve branch name from git. Falling back to %s' % headphones.CONFIG.GIT_BRANCH)
branch_name = headphones.CONFIG.GIT_BRANCH
if not branch_name:
logger.error('Could not retrieve branch name from git. Defaulting to master')
branch_name = 'master'
return cur_commit_hash, branch_name
else:
headphones.INSTALL_TYPE = 'source' |
version_file = os.path.join(headphones.PROG_DIR, 'version.txt')
if not os.path.isfile(version_file):
return None, 'master'
with open(version_file, 'r') as f:
current_version = f.read().strip(' \n\r')
if current_version:
return current_version, headphones.CONFIG.GIT_B | RANCH
else:
return None, 'master'
def checkGithub():
headphones.COMMITS_BEHIND = 0
# Get the latest version available from github
logger.info('Retrieving latest version information from GitHub')
url = 'https://api.github.com/repos/%s/headphones/commits/%s' % (
headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
version = request.request_json(url, timeout=20, validator=lambda x: type(x) == dict)
if version is None:
logger.warn(
'Could not get the latest version from GitHub. Are you running a local development version?')
return headphones.CURRENT_VERSION
headphones.LATEST_VERSION = version['sha']
logger.debug("Latest version is %s", headphones.LATEST_VERSION)
# See how many commits behind we are
if not headphones.CURRENT_VERSION:
logger.info(
'You are running an unknown version of Headphones. Run the updater to identify your version')
return headphones.LATEST_VERSION
if headphones.LATEST_VERSION == headphones.CURRENT_VERSION:
logger.info('Headphones is up to date')
return headphones.LATEST_VERSION
logger.info('Comparing currently installed version with latest GitHub version')
url = 'https://api.github.com/repos/%s/headphones/compare/%s...%s' % (
headphones.CONFIG.GIT_USER, headphones.LATEST_VERSION, headphones.CURRENT_VERSION)
commits = request.request_json(url, timeout=20, whitelist_status_code=404,
validator=lambda x: type(x) == dict)
if commits is None:
logger.warn('Could not get commits behind from GitHub.')
return headphones.LATEST_VERSION
try:
headphones.COMMITS_BEHIND = int(commits['behind_by'])
logger.debug("In total, %d commits behind", headphones.COMMITS_BEHIND)
except KeyError:
logger.info('Cannot compare versions. Are you running a local development version?')
headphones.COMMITS_BEHIND = 0
if headphones.COMMITS_BEHIND > 0:
logger.info(
'New version is available. You are %s commits behind' % headphones.COMMITS_BEHIND)
elif headphones.COMMITS_BEHIND == 0:
logger.info('Headphones is up to date')
return headphones.LATEST_VERSION
def update():
if headphones.INSTALL_TYPE == 'win':
logger.info('Windows .exe updating not supported yet.')
elif headphones.INSTALL_TYPE == 'git':
output, err = runGit('pull origin ' + headphones.CONFIG.GIT_BRANCH)
if not output:
logger.error('Couldn\'t download latest version')
for line in output.split('\n'):
if 'Already up-to-date.' in line:
logger.info('No update available, not updating')
logger.info('Output: ' + str(output))
elif line.endswith('Aborting.'):
logger.error('Unable to update from git: ' + line)
logger.info('Output: ' + str(output))
else:
tar_download_url = 'https://github.com/%s/headphones/tarball/%s' % (
headphones.CONFIG.GIT_USER, headphones.CONFIG.GIT_BRANCH)
update_dir = os.path.join(headphones.PROG_DIR, 'update')
version_path = os.path.join(headphones.PROG_DIR, 'version.txt')
logger.info('Downloading update from: ' + tar_download_url)
data = request.request_content(tar_download_url)
if not data:
logger.error("Unable to retrieve new version from '%s', can't update", tar_download_url)
return
download_name = headphones.CONFIG.GIT_BRANCH + '-github'
tar_download_path = os.path.join(headphones.PROG_DIR, download_name)
# Save tar to disk
with open(tar_download_path, 'wb') as f:
f.write(data)
# Extract the tar to update folder
logger.info('Extracting file: ' + tar_download_path)
tar = tarfile.open(tar_download_path)
tar.extractall(update_dir)
tar.close()
# Delete the tar.gz
logger.info('Deleting file: ' + tar_download_path)
os.remove(tar_download_path)
# Find update dir name
update_dir_contents = [x for x in os.listdir(update_dir) if
os.path.isdir(os.path.join(update_dir, x))]
if len(update_dir_contents) != 1:
logger.error("Invalid update data, update failed: " + str(update_dir_contents))
return
|
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""nsx_gw_devices
Revision ID: 19180cf98af6
Revises: 117643811bca
Create Date: 2014-02-26 02:46:26.151741
"""
# revision identifiers, used by Alembic.
revision = '19180cf98af6'
down_revision = '117643811bca'
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade():
if not migration.schema_has_table('networkgatewaydevices'):
# Assume that, in the database we are migrating from, the
# configured plugin did not create any nsx tables.
return
op.create_table(
'networkgatewaydevicereferences',
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('network_gateway_id', sa.String(length=36), nullable=True),
sa.Column('interface_name', sa.String(length=64), nullable=True),
sa.ForeignKeyConstraint(['network_gateway_id'], ['networkgateways.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('id', 'network_gateway_id', 'interface_name'))
# Copy data from networkgatewaydevices into networkgatewaydevicereference
op.execute("INSERT INTO networkgatewaydevicereferences SELECT "
"id, network_gateway_id, interface_name FROM "
"networkgatewaydevices")
# drop networkgatewaydevices
op.drop_table('networkgatewaydevices')
op.create_table(
'networkgatewaydevices',
sa.Column('tenant_id', sa.String(length=255), nullable=True),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('nsx_id', sa.String(length=36), nullable=True),
sa.Column('name', sa.String(length=255), nullable=True),
sa.Column('connector_type', sa.String(length=10), nullable=True),
sa.Column('connector_ip', sa.String(length=64), nullable=True),
sa.Column('status', sa.String(length=16), nullable=True),
sa.PrimaryKeyConstraint('id'))
# Create a networkgatewaydevice for each existing reference.
# For existing references nsx_id == neutron_id
| # Do not fill conenctor info as they would be unknown
op.execute("INSERT INTO networkgatewaydevices (id, nsx_id, tenant_id) "
"SELECT gw_dev_ref.id, gw_dev_ref.id as nsx_id, tenant_id "
"FROM networkgatewaydevicereferences AS gw_dev_ref "
"INNER JOIN networkgateways AS net_gw ON "
"gw_dev_ref.network_gateway_i | d=net_gw.id")
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Support for tf.contrib.data when eager execution is enabled."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from tensorflow.python.data.util import nest
from tensorflow.python.eager import context
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import resource_variable_ops
_uid_counter = 0
_uid_lock = threading.Lock()
def _iterator_shared_name():
with _uid_lock:
global _uid_counter
uid = _uid_counter
_uid_counter += 1
return "eager_iterator_{}".format(uid)
class Iterator(object):
"""An iterator producing tf.Tensor objects from a tf.contrib.data.Dataset."""
def __init__(self, dataset):
"""Creates a new iterator over the given dataset.
For example:
```python
dataset = tf.contrib.data.Dataset.range(4)
for x in Iterator(dataset):
print(x)
```
Args:
dataset: A `tf.contrib.data.Dataset` object.
Raises:
RuntimeError: When invoked without eager execution enabled.
"""
if not context.in_eager_mode():
raise RuntimeError(
"{} objects only make sense when eager execution is enabled".format(
type(self)))
with ops.device("/device:CPU:0" | ):
ds_variant = dataset._as_variant_tensor() # pylint: disable=protected-access
self._output_types = dataset.output_types
self._flat_output_types = nest.flatten(dataset.output_types)
self._flat_output_shapes = nest.flatten(dataset.output_shapes)
self._resource = gen_dataset_ops.iterator(
container="",
shared_name=_iterator_shared_name(),
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
gen_dataset_ops.m | ake_iterator(ds_variant, self._resource)
def __del__(self):
if self._resource is not None:
with ops.device("/device:CPU:0"):
resource_variable_ops.destroy_resource_op(self._resource)
self._resource = None
def __iter__(self):
return self
def __next__(self): # For Python 3 compatibility
return self.next()
def next(self):
"""Return the next tf.Tensor from the dataset."""
try:
# TODO(ashankar): Consider removing this ops.device() contextmanager
# and instead mimic ops placement in graphs: Operations on resource
# handles execute on the same device as where the resource is placed.
with ops.device("/device:CPU:0"):
ret = gen_dataset_ops.iterator_get_next(
self._resource,
output_types=self._flat_output_types,
output_shapes=self._flat_output_shapes)
return nest.pack_sequence_as(self._output_types, ret)
except errors.OutOfRangeError:
raise StopIteration
|
####################################
# Sample Receiver Script
# [Usage]
# python receiver.py
# python receiver.py > data.csv
# [Data Format]
# id,time,x,y,z
# [Exaple]
# 1,118 | .533,-0.398,-0.199,-0.978
####################################
import sys
import os
import math
import time
import SocketServer
PORTNO = 10552
class handler(SocketServer.DatagramRequestHandler):
def handle(self):
newmsg = self.rfile.readline().rstri | p()
print newmsg
s = SocketServer.UDPServer(('',PORTNO), handler)
print "Awaiting UDP messages on port %d" % PORTNO
s.serve_forever() |
'''
Servants is of primary interest to Python component developers.
The module | names should sufficiently described their inten | ded uses.
'''
__revision__ = "$Id: __init__.py,v 1.4 2005/02/25 23:42:32 dfugate Exp $"
|
#!/usr/bin/env python
strings=['hey','guys','i','am','a','string']
parameter_list=[[strings]]
def features_string_char (strings):
from shogun import StringCharFeatures, RAWBYTE
from numpy import array
#create string features
f=StringCharFeatures(strings, RAWBYTE)
#and output s | everal stats
#print("max string length", f.get_max_vector_length())
#print("number of strings", f.get_num_vectors())
#print("length of first string", f.get_vector_length(0))
#print("string[5]", ''.join(f.get_feature_vector(5)))
#print("strings", f.get_features())
#replace string 0
f.set_feature_vector(array(['t','e','s','t']), 0)
#print("strings", f.get_features())
return f.get_string_list(), f
if __name__=='__main__':
print('StringCharFeatures')
| features_string_char(*parameter_list[0])
|
# Copyright (C) 2006, Thomas Hamelryck (thamelry@binf.ku.dk)
# This code is part of the Biopython distribution and governed by its
# license. Please see the LICENSE file that should have been included
# as part of this package.
"""Wrappers for PSEA, a program for secondary structure assignment.
See this citation for P-SEA, PMID: 9183534
Labesse G, Colloc'h N, Pothier J, Mornon J-P: P-SEA: a new efficient
assignment of secondary structure from C_alpha.
Comput Appl Biosci 1997 , 13:291-295
ftp://ftp.lmcp.jussieu.fr/pub/sincris/software/protein/p-sea/
"""
import os
from Bio.PDB.Polypeptide import is_aa
def run_psea(fname):
"""Run PSEA and return output filename.
Note that this assumes the P-SEA binary is called "pse | a" and that it is
on the path.
Note that P-SEA will write an output file in the current | directory using
the input filename with extension ".sea".
Note that P-SEA will write output to the terminal while run.
"""
os.system("psea "+fname)
last=fname.split("/")[-1]
base=last.split(".")[0]
return base+".sea"
def psea(pname):
"""Parse PSEA output file."""
fname=run_psea(pname)
start=0
ss=""
fp=open(fname, 'r')
for l in fp.readlines():
if l[0:6]==">p-sea":
start=1
continue
if not start:
continue
if l[0]=="\n":
break
ss=ss+l[0:-1]
fp.close()
return ss
def psea2HEC(pseq):
"""Translate PSEA secondary structure string into HEC."""
seq=[]
for ss in pseq:
if ss=="a":
n="H"
elif ss=="b":
n="E"
elif ss=="c":
n="C"
seq.append(n)
return seq
def annotate(m, ss_seq):
"""Apply seconardary structure information to residues in model."""
c=m.get_list()[0]
all=c.get_list()
residues=[]
# Now remove HOH etc.
for res in all:
if is_aa(res):
residues.append(res)
L=len(residues)
if not (L==len(ss_seq)):
raise ValueError("Length mismatch %i %i" % (L, len(ss_seq)))
for i in range(0, L):
residues[i].xtra["SS_PSEA"]=ss_seq[i]
#os.system("rm "+fname)
class PSEA:
def __init__(self, model, filename):
ss_seq=psea(filename)
ss_seq=psea2HEC(ss_seq)
annotate(model, ss_seq)
self.ss_seq=ss_seq
def get_seq(self):
"""
Return secondary structure string.
"""
return self.ss_seq
if __name__=="__main__":
import sys
from Bio.PDB import PDBParser
# Parse PDB file
p=PDBParser()
s=p.get_structure('X', sys.argv[1])
# Annotate structure with PSEA sceondary structure info
PSEA(s[0], sys.argv[1])
|
# This file is part of Indico.
# Copyright (C) 2002 - 2017 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from ipaddress import ip_network
from operator import itemgetter
from indico.util.i18n import _
from indico.web.forms.fields import MultiStringField
class MultiIPNetworkField(MultiStringField):
"""A field to enter multiple IPv4 or IPv6 networks.
The field data is a set of ``IPNetwork``s not bound to a DB session.
The ``unique`` and ``sortable`` parameters of the parent class cannot be used with this class.
"""
def __init__(self, *args, **kwargs):
super(MultiIPNetworkField, self).__init__(*args, field=('subnet', _("subnet")), **kwargs)
self._data_converted = False
self.data = None
def _value(self):
if self.data is None:
return []
elif self._data_converted:
data = [{self.field_name: unicode(network)} for network in self.data or []]
return sorted(data, key=itemgetter(self.field_name))
else:
return self.data
def process_data(self, value):
if value is not None:
self._data_converted = True
self.data = value
def _fix_network(self, network):
network = network.encode('ascii', 'ignore')
if network.startswith('::ffff:'):
# convert ipv6-style ipv4 to regular ipv4
# the ipaddress library doesn't deal with such IPs properly!
network = network[7:]
return unicode(network)
def process_formdata(self, valuelist):
self._data_converted = False
| super(MultiIPNetworkField, self).process_formdata(valuelist)
self.data = {ip_network(self._fix_network(entry[self.field_name])) for entry in self.data}
self._data_converted = True
def pre_validate(self, form):
pass # nothing | to do
|
import argparse
import hashlib
import json
import csv
import os
MAESTRO_INDEX_PATH = '../mirdata/indexes/maestro_index.json'
def md5(file_path):
"""Get md5 hash of a file.
Parameters
----------
file_path: str
File path.
Returns
-------
md5_hash: str
md5 hash of data in file_path
"""
hash_md5 = hashlib.md5()
with open(file_path, 'rb') as fhandle:
for chunk in iter(lambda: fhandle.read(4096), b''):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def make_maestro_index(data_path):
metadata_path = os.path.join(data_path, 'maestro-v2.0.0.json')
print(metadata_path)
maestro_index = {}
with open(metadata_path, 'r') as fhandle:
metadata = json.load(fhandle)
for i, row in enumerate(metadata):
print(i)
trackid = r | ow['midi_filename'].split('.')[0]
maestro_index[trackid] = | {}
midi_path = os.path.join(data_path, row['midi_filename'])
midi_checksum = md5(midi_path)
maestro_index[trackid]['midi'] = [row['midi_filename'], midi_checksum]
audio_path = os.path.join(data_path, row['audio_filename'])
audio_checksum = md5(audio_path)
maestro_index[trackid]['audio'] = [row['audio_filename'], audio_checksum]
with open(MAESTRO_INDEX_PATH, 'w') as fhandle:
json.dump(maestro_index, fhandle, indent=2)
def main(args):
print("creating index...")
make_maestro_index(args.maestro_data_path)
print("done!")
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(description='Make MAESTRO index file.')
PARSER.add_argument(
'maestro_data_path', type=str, help='Path to MAESTRO data folder.'
)
main(PARSER.parse_args())
|
import simplejson as json
import xmltodict
import requests
import os
import re
import sys
import base64
from urlparse import urlparse
from os.path import splitext, basename
from BeautifulSoup import BeautifulSoup
nb_token = os.environ.get('NB_TOKEN')
site_slug = os.environ.get('SITE_SLUG')
api_url = "https://" + site_slug + ".nationbuilder.com/api/v1/sites/" + \
site_slug
def youtube_links(input_text):
''' Find youtube links in any wordpress blogpost '''
# This is some horrible regex, see
# http://stackoverflow.com/questions/839994/extracting-a-url-in-python
url = re.findall(r'(src=\S+)', input_text)
if len(url) > 0:
url = url[0]
url = url.replace('src=', '')
url = url.replace('"', '')
# The URL format for youtube videos changed halfway through the life
# of the blog, this tests for the format and returns the correct URL
if url.startswith("//"):
# If it's the new broken format append http,
# otherwise just return the URL
url = "http:" + url
return url
else:
return url
def read_xml(input_xml):
''' Reads an | xml file and turns it into a dictionary '''
f = open(input_xml, 'r')
doc_xml = f.read()
doc = xmltodict.parse(doc_xml)
return doc
def remove_img_tags(input):
''' Removes img tags from text '''
soup = BeautifulSoup(input)
[s.extract() for s in soup('img')]
return str(soup)
def image_links | (input):
''' Finds all the image links in a string '''
list_of_images = []
soup = BeautifulSoup(input)
for i in soup.findAll('img'):
list_of_images.append(i)
return list_of_images
def convert_wp2nb(input_xml):
'''
Extracts the relevant items from wordpress posts and converts it into a
nationbuilder friendly format. If there are any youtube links it appends
them to the end of the post
'''
content = input_xml['content:encoded']
if content is not None:
content = content.replace('\n', '<br>')
# extract image URLs to be uploaded
image_urls = image_links(content)
# remove img tags as they will be brought in using liquid tags
content = remove_img_tags(content)
if content.find('youtube') > 0:
youtube_url = youtube_links(content)
if youtube_url is not None:
content = content + youtube_url
output_dict = {
'post':
{'blog_post': {
'name': input_xml['title'],
'slug': input_xml['wp:post_id'],
'status': 'published',
'content_before_flip': content,
'published_at': input_xml['pubDate'],
'author_id': '2'}},
'images': image_urls}
return output_dict
def upload_blog_post(input_json):
''' Uploads blog posts to the nationbuilder URL '''
url = api_url + '/pages/blogs/1/posts'
payload = input_json
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token}
r = requests.post(url, data=payload, headers=headers, params=parameters)
response = r.status_code
print response
def delete_post(id):
''' Delete a nationbuilder post '''
url = api_url + '/pages/blogs/1/posts/%s' % id
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token}
r = requests.delete(url, headers=headers, params=parameters)
response = r.status_code
print response
def get_posts():
''' Get blog post IDs of the blog '''
url = api_url + '/pages/blogs/1/posts/'
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token, 'per_page': '100'}
r = requests.get(url, headers=headers, params=parameters)
response = json.loads(r.content)
return response
def upload_image(page_slug, image_url):
''' Upload an image attachment to a blog post '''
url = api_url + '/pages/%s/attachments' % page_slug
image = prepare_image(image_url)
headers = {'Content-Type': 'application/json',
'Accept': 'application/json'}
parameters = {'access_token': nb_token}
r = requests.post(url, headers=headers, params=parameters,
data=json.dumps(image))
return r
def prepare_image(url):
'''
Downloads an image, encodes it to base64 for the NB api and sets required
parameters then returns a dictionary
'''
# Download the image then enode it as bas364 per the NB api requirements
image = requests.get(url)
image_base64 = base64.b64encode(image.content)
# This splits the filename from the URL. See
# http://stackoverflow.com/questions/10552188/python-split-url-to-find-image-name-and-extension
image_disassembled = urlparse(image.url)
filename, file_ext = splitext(basename(image_disassembled.path))
image_filename = filename[1:] + file_ext
content = {'attachment': {'filename': image_filename, 'content_type': 'image/jpeg', 'updated_at': '2013-06-06T10:15:02-07:00', 'content': image_base64}}
return content
def delete_all_posts():
''' Removes all posts from the blog '''
posts = get_posts()
post_ids = []
for i in posts['results']:
post_ids.append(i['id'])
for i in post_ids:
delete_post(i)
if __name__ == "__main__":
''' Convert an xml file then upload it to nationbuilder '''
input_file = sys.argv[1]
doc = read_xml(input_file)
# Iterate through the xml entries, if there is any content,
# then upload to the blog
for i in doc['rss']['channel']['item']:
if i['content:encoded']:
output_dict = convert_wp2nb(i)
upload_blog_post(json.dumps(output_dict['post']))
# If the post contains any images, theng go through and
# upload them to the relevant page
if output_dict['images']:
for i in output_dict['images']:
upload_image(output_dict['post']['blog_post']['slug'], i) |
.filename and not self.needs_update():
return
if not os.path.isfile(filename):
return
stats = os.stat(filename)
data = ScriptFile(filename)
self.filename = filename
self.filesize = int(stats.st_size)
self.last_edited = int(stats.st_mtime)
self.data = data
####################################################################
### @fn update()
####################################################################
def update(self):
self.load_file(self.filename)
####################################################################
### @fn needs_update()
####################################################################
def needs_update(self):
if not isinstance(self.data, ScriptFile):
_LOGGER.warning("Probably shouldn't be doing this.")
return True
stats = os.stat(self.filename)
filesize = int(stats.st_size)
last_edited = int(stats.st_mtime)
return (filesize != int(self.filesize) or last_edited != int(self.last_edited))
###################################################### | ##########################
### @class ScriptAnalytics
####################################################### | #########################
class ScriptAnalytics():
####################################################################
### @fn __init__()
####################################################################
def __init__(self):
self.script_data = {}
self.load()
####################################################################
### @fn load()
####################################################################
def load(self):
# if os.path.isfile(DATA_FILE):
try:
with open(DATA_FILE, "rb") as f:
self.script_data = pickle.load(f)
# else:
except:
self.script_data = {}
self.update()
####################################################################
### @fn save()
####################################################################
def save(self):
with open(DATA_FILE, "wb") as f:
pickle.dump(self.script_data, f, pickle.HIGHEST_PROTOCOL)
####################################################################
### @fn update(dir_filter)
### Updates files whose directory match the filter.
####################################################################
def update(self, dir_filter = DEFAULT_FILTER):
txt_files = ScriptAnalytics.list_txt_files(dir_filter)
for txt_file in txt_files:
self.update_file(txt_file)
####################################################################
### @fn update_file(filename)
### Updates the given file.
####################################################################
def update_file(self, filename):
try:
self.script_data[filename].update()
except:
self.script_data[filename] = ScriptData(os.path.join(common.editor_config.umdimage_dir, filename))
#print "Probably shouldn't be doing this."
####################################################################
### @fn search_gen(text_filter, dir_filter, search_flags)
### Returns a list of files whose contents match the text filter
### and whose directory matches the directory filter.
### This is a generator which yields:
### * the current file number
### * the total number of files
### * the current filename
### * a list of matches found since the last yield
####################################################################
def search_gen(self, text_filter, dir_filter = DEFAULT_FILTER, search_flags = DEFAULT_SEARCH_FLAGS):
matches = []
original = search_flags & SEARCH_ORIGINAL
translated = search_flags & SEARCH_TRANSLATED
comments = search_flags & SEARCH_COMMENTS
notags = search_flags & SEARCH_NOTAGS
last_update = time.time()
#for i, (path, data) in enumerate(self.script_data.iteritems()):
for i, path in enumerate(self.script_data):
#if i % 500 == 0:
if time.time() - last_update > MIN_INTERVAL:
yield i, len(self.script_data), path, matches
matches = []
last_update = time.time()
if not dir_filter.search(path):
continue
self.update_file(path)
data = self.script_data[path]
to_search = []
if original:
to_search.append(data.data.original_notags if notags else data.data.original)
if translated:
to_search.append(data.data.translated_notags if notags else data.data.translated)
if comments:
to_search.append(data.data.comments)
to_search = "\n".join(to_search)
if text_filter.search(to_search):
matches.append(path)
yield len(self.script_data), len(self.script_data), "", matches
####################################################################
### @fn search(text_filter, dir_filter, search_flags)
### Returns a list of files whose contents match the text filter
### and whose directory matches the directory filter.
####################################################################
def search(self, text_filter, dir_filter = DEFAULT_FILTER, search_flags = DEFAULT_SEARCH_FLAGS):
matches = []
for index, total, path, cur_matches in search_gen(text_filter, dir_filter, search_flags):
matches.extend(cur_matches)
return matches
####################################################################
### @fn get_data(dir_filter)
### A generator which yields:
### * the file number
### * the total number of files
### * the filename
### * and the data field of each file that matches the filter
### or None if there wasn't a match at a periodic interval
####################################################################
def get_data(self, dir_filter = DEFAULT_FILTER):
#self.update(dir_filter)
last_update = time.time()
#for i, (path, data) in enumerate(sorted(self.script_data.iteritems())):
for i, path in enumerate(sorted(self.script_data.keys())):
if not dir_filter.search(path):
#if i % 500 == 0:
if time.time() - last_update > MIN_INTERVAL:
yield i, len(self.script_data), path, None
last_update = time.time()
continue
self.update_file(path)
data = self.script_data[path]
yield i, len(self.script_data), path, data.data
last_update = time.time()
####################################################################
### @fn list_txt_files(dir_filter)
### Returns a list of files whose directory match the filter.
####################################################################
@staticmethod
def list_txt_files(dir_filter = DEFAULT_FILTER):
files = []
for dir in ScriptAnalytics.list_dirs(dir_filter):
temp_files = list_files.list_all_files(os.path.join(common.editor_config.umdimage_dir, dir))
files.extend(temp_files)
# For our dupe database, we need "umdimage" instead of wherever the files
# are really stored, so we strip that part off first.
dir_start = len(common.editor_config.umdimage_dir) + 1
text_files = []
for file in files:
if os.path.splitext(file)[1] == ".txt":
text_files.append(file[dir_start:])
return text_files
####################################################################
### @fn list_dirs(filter)
### Returns a list of directories that match the filter.
####################################################################
@staticmethod
def list_dirs(filter = DEFAULT_FILTER):
dirs = []
base_dir = common.editor_config.umdimage_dir
for item in os.listdir(base_dir):
full_path = os.path.join(base_dir, item)
if os.path.isdir(full_path):
if filter.search(item):
dirs.append(item)
return dirs
SA = ScriptAnalytics()
if __name__ == "__main__":
start_time = None
def lazy_timer():
global start_time
if start_time == None:
start_time = |
"""
Django settings for {{ project_name }} project.
"""
import os
import re
MAIN_APPLICATION_PATH = os.path.dirname(
os.path.dirname(os.path.abspath(__file__))
)
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(MAIN_APPLICATION_PATH)
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ.get(
'DJANGO_SECRET', '{{ secret_key }}'
)
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ.get('DEBUG', 'False').lower() in (
"on", "yes", "true", "t", "1"
)
if DE | BUG:
ALLOWED_HOSTS = ['*']
else:
ah = os.environ.get('ALLOWED_HOSTS', '*')
ah = re.split(',', ah)
ALLOWED_HOSTS = ah
SITE_ID = 1
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.c | ontrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites',
]
LOCAL_APPS = [
'core',
]
INSTALLED_APPS.extend(LOCAL_APPS)
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'loaders': [
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'ru-ru'
TIME_ZONE = 'Asia/Krasnoyarsk'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATICFILES_FINDERS = [
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
]
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static_files')
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOCALE_PATHS = [
os.path.join(BASE_DIR, 'translations'),
]
|
def powers_of_two(limit):
value = 1
while value < limit:
yield value
value += value
# Use the generator
for i in powers_of_two(70):
print(i)
# Explore the mechanism
g = powers_of_two(100) |
assert str(type(powers_of_two)) == "<class 'function'>"
assert str(type(g)) == | "<class 'generator'>"
assert g.__next__() == 1
assert g.__next__() == 2
assert next(g) == 4
assert next(g) == 8
|
(%s, 'D')" % field_name
else:
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions050.htm
sql = "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
return sql, []
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
if settings.USE_TZ:
field_name = self._convert_field_to_tz(field_name, tzname)
# http://docs.oracle.com/cd/B19306_01/server.102/b14200/functions230.htm#i1002084
if lookup_type in ('year', 'month'):
sql = "TRUNC(%s, '%s')" % (field_name, lookup_type.upper())
elif lookup_type == 'day':
sql = "TRUNC(%s)" % field_name
elif lookup_type == 'hour':
sql = "TRUNC(%s, 'HH24')" % field_name
elif lookup_type == 'minute':
sql = "TRUNC(%s, 'MI')" % field_name
else:
sql = field_name # Cast to DATE removes sub-second precision.
return sql, []
def convert_values(self, value, field):
if isinstance(value, Database.LOB):
value = value.read()
if field and field.get_internal_type() == 'TextField':
value = force_text(value)
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and field and field.empty_strings_allowed:
value = ''
# Convert 1 or 0 to True or False
elif value in (1, 0) and field and field.get_internal_type() in ('BooleanField', 'NullBooleanField'):
value = bool(value)
# Force floats to the correct type
elif value is not None and field and field.get_internal_type() == 'FloatField':
value = float(value)
# Convert floats to decimals
elif value is not None and field and field.get_internal_type() == 'DecimalField':
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
if field and field.get_internal_type() == 'DateTimeField':
pass
elif field and field.get_internal_type() == 'DateField':
value = value.date()
elif field and field.get_internal_type() == 'TimeField' or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def deferrable_sql(self):
return " DEFERRABLE INITIALLY DEFERRED"
def drop_sequence_sql(self, table):
return "DROP SEQUENCE %s;" % self.quote_name(self._get_sequence_name(table))
def fetch_returned_insert_id(self, cursor):
return int(cursor._insert_id_var.getvalue())
def field_cast_sql(self, db_type, internal_type):
if db_type and db_type.endswith('LOB'):
return "DBMS_LOB.SUBSTR(%s)"
else:
return "%s"
def last_executed_query(self, cursor, sql, params):
# http://cx-oracle.sourceforge.net/html/cursor.html#Cursor.statement
# The DB API definition does not define this attribute.
statement = cursor.statement
if statement and six.PY2 and not isinstance(statement, unicode):
statement = statement.decode('utf-8')
# Unlike Psycopg's `query` and MySQLdb`'s `_last_executed`, CxOracle's
# `statement` doesn't contain the query parameters. refs #20010.
return super(DatabaseOperations, self).last_executed_query(cursor, statement, params)
def last_insert_id(self, cursor, table_name, pk_name):
sq_name = self._get_sequence_name(table_name)
cursor.execute('SELECT "%s".currval FROM dual' % sq_name)
return cursor.fetchone()[0]
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_in_list_size(self):
return 1000
def max_name_length(self):
return 30
def prep_for_iexact_query(self, x):
return x
def process_clob(self, value):
if value is None:
return ''
return force_text(value.read())
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
# not quoted, Oracle has case-insensitive behavior for identifiers, but
# always defaults to uppercase.
# We simplify things by making Oracle identifiers always uppercase.
if not name.startswith('"') and not name.endswith('"'):
name = '"%s"' % util.truncate_name(name.upper(),
self.max_name_length())
# Oracle puts the query text into a (query % args) construct, so % signs
# in names need to be escaped. The '%%' will be collapsed back to '%' at
# that stage so we aren't really making the name longer here.
name = name.replace('%','%%')
return name.upper()
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_typ | e):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, | then create
# a cursor to initialize it and recur.
self.connection.cursor()
return self.connection.ops.regex_lookup(lookup_type)
def return_insert_id(self):
return "RETURNING %s INTO %%s", (InsertIdVar(),)
def savepoint_create_sql(self, sid):
return convert_unicode("SAVEPOINT " + self.quote_name(sid))
def savepoint_rollback_sql(self, sid):
return convert_unicode("ROLLBACK TO SAVEPOINT " + self.quote_name(sid))
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
if tables:
# Oracle does support TRUNCATE, but it seems to get us into
# FK referential trouble, whereas DELETE FROM table works.
sql = ['%s %s %s;' % (
style.SQL_KEYWORD('DELETE'),
style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table))
) for table in tables]
# Since we've just deleted all the rows, running our sequence
# ALTER code will reset the sequence to 0.
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def sequence_reset_by_name_sql(self, style, sequences):
sql = []
for sequence_info in sequences:
sequence_name = self._get_sequence_name(sequence_info['table'])
table_name = self.quote_name(sequence_info['table'])
column_name = self.quote_name(sequence_info['column'] or 'id')
query = _get_sequence_reset_sql() % {'sequence': sequence_name,
'table': table_name,
|
"""
Import prescribing data from CSV files into SQLite
"""
from collections import namedtuple
import csv
from itertools import groupby
import logging
import os
import sqlite3
import gzip
import heapq
from matrixstore.matrix_ops import sparse_matrix, finalise_matrix
from matrixstore.serializer import serialize_compressed
from .common import get_prescribing_filename
logger = logging.getLogger(__name__)
MatrixRow = namedtuple("MatrixRow", "bnf_code items quantity actual_cost net_cost")
class MissingHeaderError(Exception):
pass
def import_prescribing(filename):
if not os.path.exists(filename):
raise RuntimeError("No SQLite file at: {}".format(filename))
connection = sqlite3.connect(filename)
# Trade crash-safety for insert speed
connection.execute("PRAGMA synchronous=OFF")
dates = [date for (date,) in connection.execute("SELECT date FROM date")]
prescriptions = get_prescriptions_for_dates(dates)
write_prescribing(connection, prescriptions)
connection.commit()
connection.close()
def write_pres | cribing(connection, prescriptions):
cursor = connection.cursor()
# Map practice codes and date strings to their corresponding row/column
# offset in the matrix
practices = dict(cursor.execute("SELECT code, offset FROM practice"))
dates = dict(cursor.execute("SELECT date, offset FROM date"))
matrices = build_matrices(prescriptions, practices, dates)
rows = format_as_sql_rows(matric | es, connection)
cursor.executemany(
"""
UPDATE presentation SET items=?, quantity=?, actual_cost=?, net_cost=?
WHERE bnf_code=?
""",
rows,
)
def get_prescriptions_for_dates(dates):
"""
Yield all prescribing data for the given dates as tuples of the form:
bnf_code, practice_code, date, items, quantity, actual_cost, net_cost
sorted by bnf_code, practice and date.
"""
dates = sorted(dates)
filenames = [get_prescribing_filename(date) for date in dates]
missing_files = [f for f in filenames if not os.path.exists(f)]
if missing_files:
raise RuntimeError(
"Some required CSV files were missing:\n {}".format(
"\n ".join(missing_files)
)
)
prescribing_streams = [read_gzipped_prescribing_csv(f) for f in filenames]
# We assume that the input files are already sorted by (bnf_code, practice,
# month) so to ensure that the combined stream is sorted we just need to
# merge them correctly, which heapq.merge handles nicely for us
return heapq.merge(*prescribing_streams)
def read_gzipped_prescribing_csv(filename):
with gzip.open(filename, "rt") as f:
for row in parse_prescribing_csv(f):
yield row
def parse_prescribing_csv(input_stream):
"""
Accepts a stream of CSV and yields prescribing data as tuples of the form:
bnf_code, practice_code, date, items, quantity, actual_cost, net_cost
"""
reader = csv.reader(input_stream)
headers = next(reader)
try:
bnf_code_col = headers.index("bnf_code")
practice_col = headers.index("practice")
date_col = headers.index("month")
items_col = headers.index("items")
quantity_col = headers.index("quantity")
actual_cost_col = headers.index("actual_cost")
net_cost_col = headers.index("net_cost")
except ValueError as e:
raise MissingHeaderError(str(e))
for row in reader:
yield (
# These sometimes have trailing spaces in the CSV
row[bnf_code_col].strip(),
row[practice_col].strip(),
# We only need the YYYY-MM-DD part of the date
row[date_col][:10],
int(row[items_col]),
float(row[quantity_col]),
pounds_to_pence(row[actual_cost_col]),
pounds_to_pence(row[net_cost_col]),
)
def pounds_to_pence(value):
return int(round(float(value) * 100))
def build_matrices(prescriptions, practices, dates):
"""
Accepts an iterable of prescriptions plus mappings of pratice codes and
date strings to their respective row/column offsets. Yields tuples of the
form:
bnf_code, items_matrix, quantity_matrix, actual_cost_matrix, net_cost_matrix
Where the matrices contain the prescribed values for that presentation for
every practice and date.
"""
max_row = max(practices.values())
max_col = max(dates.values())
shape = (max_row + 1, max_col + 1)
grouped_by_bnf_code = groupby(prescriptions, lambda row: row[0])
for bnf_code, row_group in grouped_by_bnf_code:
items_matrix = sparse_matrix(shape, integer=True)
quantity_matrix = sparse_matrix(shape, integer=False)
actual_cost_matrix = sparse_matrix(shape, integer=True)
net_cost_matrix = sparse_matrix(shape, integer=True)
for _, practice, date, items, quantity, actual_cost, net_cost in row_group:
practice_offset = practices[practice]
date_offset = dates[date]
items_matrix[practice_offset, date_offset] = items
quantity_matrix[practice_offset, date_offset] = quantity
actual_cost_matrix[practice_offset, date_offset] = actual_cost
net_cost_matrix[practice_offset, date_offset] = net_cost
yield MatrixRow(
bnf_code,
finalise_matrix(items_matrix),
finalise_matrix(quantity_matrix),
finalise_matrix(actual_cost_matrix),
finalise_matrix(net_cost_matrix),
)
def format_as_sql_rows(matrices, connection):
"""
Given an iterable of MatrixRows (which contain a BNF code plus all
prescribing data for that presentation) yield tuples of values ready for
insertion into SQLite
"""
cursor = connection.cursor()
num_presentations = next(cursor.execute("SELECT COUNT(*) FROM presentation"))[0]
count = 0
for row in matrices:
count += 1
# We make sure we have a row for every BNF code in the data, even ones
# we didn't know about previously. This is a hack that we won't need
# once we can use SQLite v3.24.0 which has proper UPSERT support.
cursor.execute(
"INSERT OR IGNORE INTO presentation (bnf_code) VALUES (?)", [row.bnf_code]
)
if should_log_message(count):
logger.info(
"Writing data for %s (%s/%s)", row.bnf_code, count, num_presentations
)
yield (
serialize_compressed(row.items),
serialize_compressed(row.quantity),
serialize_compressed(row.actual_cost),
serialize_compressed(row.net_cost),
row.bnf_code,
)
logger.info("Finished writing data for %s presentations", count)
def should_log_message(n):
"""
To avoid cluttering log output we don't log the insertion of every single
presentation
"""
if n <= 10:
return True
if n == 100:
return True
return n % 200 == 0
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from contextable import Contextable
class Terms(Contextable):
"""
An address in the invoice
"""
def __init__(self, days, string):
self.days = days
self.st | ring = string
def conte | xt(self):
return {
'terms': {
'string': self.string,
'days': self.days
}
}
|
from django.db import models
from django.db.models import Q
class FriendshipManager(models.Manager):
"""
Provides an interface to friends
"""
def friends_for_user(self, user):
"""
Returns friends for specific user
"""
friends = []
qs = self.filter(Q(from_user=user) | Q(to_user=user)).select_related(depth=1)
for friendship in qs:
if friendship.from_user == user:
friends.append(friendship.to_user)
else:
friends.append(friendship.from_user)
return friends
def are_friends(self, user1, user2):
"""
Returns boolean value of whether user1 and user2 are currently friends.
"""
return self.filter(
Q(from_user=user1, to_user=user2) |
Q(from_user=user2, to_user=user1)
).count() > 0
def remove(self, user1, user2):
"""
Removes specific user from another specific users friends list.
"""
friendships = self.filter(from_user=user1, to_user=user2)
if not friendships:
fr | iendships = self.filter(from_user=user2, to_user=user1)
if friendships:
friends | hips.delete()
class FriendshipInvitationManager(models.Manager):
"""
Provides an interface to friendship invitations
"""
def is_invited(self, user1, user2):
"""
Returns boolean value of whether user1 has been invited to a friendship by user2
"""
return self.filter(
Q(from_user=user1, to_user=user2) |
Q(from_user=user2, to_user=user1)
).count() > 0
def remove(self, user1, user2):
"""
Removes friendship request from user1 to user2.
"""
invitations = self.filter(from_user=user1, to_user=user2)
if not invitations:
invitations = self.filter(from_user=user2, to_user=user1)
if invitations:
invitations.delete()
class BlockingManager(models.Manager):
"""
Provides an interface to blocked users for all users.
"""
def blocked_for_user(self, user):
"""
Returns users that have blocked the specified user.
"""
blocked = []
qs = self.filter(from_user=user).select_related(depth=1)
for blocking in qs:
blocked.append(blocking.to_user)
return blocked
|
urn ident
def uvf1_download(radio):
"""Download from TYT TH-UVF1"""
data = uvf1_identify(radio)
for i in range(0, 0x1000, 0x10):
msg = struct.pack(">BHB", ord("R"), i, 0x10)
radio.pipe.write(msg)
block = radio.pipe.read(0x10 + 4)
if len(block) != (0x10 + 4):
raise errors.RadioError("Radio sent a short block")
radio.pipe.write("\x06")
ack = radio.pipe.read(1)
if ack != "\x06":
raise errors.RadioError("Radio NAKed block")
data += block[4:]
status = chirp_common.Status()
status.cur = i
status.max = 0x1000
status.msg = "Cloning from radio"
radio.status_fn(status)
radio.pipe.write("\x45")
return memmap.MemoryMap(data)
def uvf1_upload(radio):
"""Upload to TYT TH-UVF1"""
data = uvf1_identify(radio)
radio.pipe.timeout = 1
if data != radio._mmap[:16]:
raise errors.RadioError("Unable to talk to this model")
for i in range(0, 0x1000, 0x10):
addr = i + 0x10
msg = struct.pack(">BHB", ord("W"), i, 0x10)
msg += radio._mmap[addr:addr+0x10]
radio.pipe.write(msg)
ack = radio.pipe.read(1)
if ack != "\x06":
LOG.debug(repr(ack))
raise errors.RadioError("Radio did not ack block %i" % i)
status = chirp_common.Status()
status.cur = i
status.max = 0x1000
status.msg = "Cloning to radio"
radio.status_fn(status)
# End of clone?
radio.pipe.write("\x45")
THUV1F_MEM_FORMAT = """
struct mem {
bbcd rx_freq[4];
bbcd tx_freq[4];
lbcd rx_tone[2];
lbcd tx_tone[2];
u8 unknown1:1,
pttid:2,
unknown2:2,
ishighpower:1,
unknown3:2;
u8 unknown4:4,
isnarrow:1,
vox:1,
bcl:2;
u8 unknown5:1,
scan:1,
unknown6:3,
scramble_code:3;
u8 unknown7;
};
struct name {
char name[7];
};
#seekto 0x0020;
struct mem memory[128];
#seekto 0x0840;
struct {
u8 scans:2,
autolk:1,
unknown1:5;
u8 light:2,
unknown6:2,
disnm:1,
voice:1,
beep:1,
rxsave:1;
u8 led:2,
unknown5:3,
ani:1,
roger:1,
dw:1;
u8 opnmsg:2,
unknown4:1,
dwait:1,
unknown9:4;
u8 squelch;
u8 unknown2:4,
tot:4;
u8 unknown3:4,
vox_level:4;
u8 pad[10];
char ponmsg[6];
} settings;
#seekto 0x08D0;
struct name names[128];
"""
LED_LIST = ["Off", "On", "Auto"]
LIGHT_LIST = ["Purple", "Orange", "Blue"]
VOX_LIST = ["1", "2", "3", "4", "5", "6", "7", "8"]
TOT_LIST = ["Off", "30s", "60s", "90s", "120s", "150s", "180s", "210s",
"240s", "270s"]
SCANS_LIST = ["Time", "Carry", "Seek"]
OPNMSG_LIST = ["Off", "DC", "Message"]
POWER_LEVELS = [chirp_common.PowerLevel("High", watts=5),
chirp_common.PowerLevel("Low", watts=1),
]
PTTID_LIST = ["Off", "BOT", "EOT", "Both"]
BCL_LIST = ["Off", "CSQ", "QT/DQT"]
CODES_LIST = [x for x in range(1, 9)]
@directory.register
class TYTTHUVF1Radio(chirp_common.CloneModeRadio):
"""TYT TH-UVF1"""
VENDOR = "TYT"
MODEL = "TH-UVF1"
def get_features(self):
rf = chirp_common.RadioFeatures()
rf.memory_bounds = (1, 128)
rf.has_bank = False
rf.has_ctone = True
rf.has_tuning_step = False
rf.has_cross = True
rf.has_rx_dtcs = True
rf.has_settings = True
rf.can_odd_split = True
rf.valid_duplexes = ["", "-", "+", "split", "off"]
rf.valid_tmodes = ["", "Tone", "TSQL", "DTCS", "Cross"]
rf.valid_characters = chirp_common.CHARSET_UPPER_NUMERIC + "-"
rf.valid_bands = [(136000000, 174000000),
(420000000, 470000000)]
rf.valid_skips = ["", "S"]
rf.valid_power_levels = POWER_LEVELS
rf.valid_modes = ["FM", "NFM"]
rf.valid_name_length = 7
rf.valid_cross_modes = ["Tone->Tone", "DTCS->DTCS",
"Tone->DTCS", "DTCS->Tone",
"->Tone", "->DTCS", "DTCS->"]
return rf
def sync_in(self):
try:
self._mmap = uvf1_download(self)
except errors.RadioError:
raise
except Exception, e:
raise errors.RadioError("Failed to communicate with radio: %s" % e)
self.process_mmap()
def sync_out(self):
try:
uvf1_upload(self)
except errors.RadioError:
raise
except Exception, e:
raise errors.RadioError("Failed to communicate with radio: %s" % e)
@classmethod
def match_model(cls, filedata, filename):
# TYT TH-UVF1 original
if filedata.startswith("\x13\x60\x17\x40\x40\x00\x48\x00" +
"\x35\x00\x39\x00\x47\x00\x52\x00"):
return True
# TYT TH-UVF1 V2
elif filedata.startswith("\x14\x40\x14\x80\x43\x00\x45\x00" +
"\x13\x60\x17\x40\x40\x00\x47\x00"):
return True
else:
return False
def process_mmap(self):
self._memobj = bitwise.parse(THUV1F_MEM_FORMAT, self._mmap)
def _decode_tone(self, t | oneval):
pol = "N"
rawval = (toneval[1].get_bits(0xFF) << 8) | toneval[0].get_bits(0xFF)
if toneval[0].get_bits(0xFF) == 0xFF:
mode = ""
val = 0
elif toneval[1].get_bits(0xC0) == 0xC0:
mode = "DTCS"
| val = int("%x" % (rawval & 0x3FFF))
pol = "R"
elif toneval[1].get_bits(0x80):
mode = "DTCS"
val = int("%x" % (rawval & 0x3FFF))
else:
mode = "Tone"
val = int(toneval) / 10.0
return mode, val, pol
def _encode_tone(self, _toneval, mode, val, pol):
toneval = 0
if mode == "Tone":
toneval = int("%i" % (val * 10), 16)
elif mode == "DTCS":
toneval = int("%i" % val, 16)
toneval |= 0x8000
if pol == "R":
toneval |= 0x4000
else:
toneval = 0xFFFF
_toneval[0].set_raw(toneval & 0xFF)
_toneval[1].set_raw((toneval >> 8) & 0xFF)
def get_raw_memory(self, number):
return repr(self._memobj.memory[number - 1])
def _is_txinh(self, _mem):
raw_tx = ""
for i in range(0, 4):
raw_tx += _mem.tx_freq[i].get_raw()
return raw_tx == "\xFF\xFF\xFF\xFF"
def get_memory(self, number):
_mem = self._memobj.memory[number - 1]
mem = chirp_common.Memory()
mem.number = number
if _mem.get_raw().startswith("\xFF\xFF\xFF\xFF"):
mem.empty = True
return mem
mem.freq = int(_mem.rx_freq) * 10
txfreq = int(_mem.tx_freq) * 10
if self._is_txinh(_mem):
mem.duplex = "off"
mem.offset = 0
elif txfreq == mem.freq:
mem.duplex = ""
elif abs(txfreq - mem.freq) > 70000000:
mem.duplex = "split"
mem.offset = txfreq
elif txfreq < mem.freq:
mem.duplex = "-"
mem.offset = mem.freq - txfreq
elif txfreq > mem.freq:
mem.duplex = "+"
mem.offset = txfreq - mem.freq
txmode, txval, txpol = self._decode_tone(_mem.tx_tone)
rxmode, rxval, rxpol = self._decode_tone(_mem.rx_tone)
chirp_common.split_tone_decode(
mem, (txmode, txval, txpol), (rxmode, rxval, rxpol))
mem.name = str(self._memobj.names[number - 1].name)
mem.name = mem.name.replace("\xFF", " ").rstrip()
mem.skip = not _mem.scan and "S" or ""
mem.mode = _mem.isnarrow and "NFM" or "FM"
mem.power = POWER_LEVELS[1 - _mem.ishighpower]
mem.extra = RadioSettingGroup("extra", "Extra Settings")
rs = RadioSetting("pttid", "PTT ID",
RadioSettingValueList(PTTID_LIST,
PTTID_LIST[_mem.pttid]))
mem.extra.append(rs)
rs = RadioSetting("vox", "VOX",
RadioSettingValueBoolean(_mem.vox))
|
from django.test import TestCase
from django.core.exceptions import FieldError
from models import User, Poll, Choice
class ReverseLookupTests(TestCase):
def setUp(self):
john = User.objects.create(name="John Doe")
jim = User.objects.create(name="Jim Bo")
first_poll = Poll.objects.create(
question="What's the first question?",
creator=john
)
second_poll = Poll.objects.create(
question="What's the second question?",
creator=jim
)
new_choice = Choice.objects.create(
poll=first_poll,
related_poll=second_poll,
name="This is the answer."
)
def test_reverse_by_field(self):
u1 = User.objects.get(
poll__question__exact="What's the first question?"
)
self.assertEqual(u1.name, "John Doe")
u2 = User.objects.get(
poll__question__exact="What's the second question?"
)
self.assertEqual(u2.name, "Jim Bo")
def test_reverse_by_related_name(self):
p1 = Poll.objects.get(poll_choice__name__exact="This is the answer.")
self.assertEqual(p1.question, "What's the first ques | tion?")
p2 = Poll.objects.get(
related_choice__name__exact="This is the answer.")
self.assertEqual(p2.question, "What's the second question?")
def test_reve | rse_field_name_disallowed(self):
"""
If a related_name is given you can't use the field name instead
"""
self.assertRaises(FieldError, Poll.objects.get,
choice__name__exact="This is the answer")
|
#!/usr/bin/env python
# update-dependencies-bad.py - Fails on bad.swift -*- python -*-
#
# This source file is part of the Swift.org open source projec | t
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ----------------------------------------------------------------------------
#
# Fails if the input file is named "bad.swift" or "crash.swift"; otherwise
# dispatches to update-dependencie | s.py. "crash.swift" gives an exit code
# other than 1.
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import shutil
import sys
assert sys.argv[1] == '-frontend'
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
if (os.path.basename(primaryFile) == 'bad.swift' or
os.path.basename(primaryFile) == 'crash.swift'):
print("Handled", os.path.basename(primaryFile))
# Replace the dependencies file with the input file.
try:
depsFile = sys.argv[sys.argv.index(
'-emit-reference-dependencies-path') + 1]
shutil.copyfile(primaryFile, depsFile)
except ValueError:
pass
if os.path.basename(primaryFile) == 'bad.swift':
sys.exit(1)
else:
sys.exit(129)
execDir = os.path.dirname(os.path.abspath(__file__))
execfile(os.path.join(execDir, "update-dependencies.py"))
|
# -*- | coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myhpom', '0012_auto_20180718_1140'),
]
operations = [
migrations.AlterModelOpt | ions(
name='staterequirement',
options={'ordering': ['-state', 'id']},
),
migrations.AlterUniqueTogether(
name='staterequirement',
unique_together=set([('state', 'text')]),
),
migrations.RemoveField(
model_name='staterequirement',
name='position',
),
]
|
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Tavendo GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION W | ITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
import json
import random
import sys
from multiprocessing import Process
# from autobahn.asyncio.websocket import WebSocketClientProtocol, \
# WebSocketCl | ientFactory
from autobahn.twisted.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
# import trollius
class MyClientProtocol(WebSocketClientProtocol):
def __init__(self, *args, **kwargs):
self.handle = ''
self.pair_handle = ''
self.open_positions = ''
self.my_move = False
super(MyClientProtocol, self).__init__(*args, **kwargs)
def onConnect(self, response):
print("Server connected: {0}".format(response.peer))
# @trollius.coroutine
def onOpen(self):
print("WebSocket connection open.")
# start sending messages every second ..
def onMessage(self, payload, isBinary):
data = json.loads(payload)
print data
if data['action'] == 'connect':
self.handle = data['handle']
data = {}
data['action'] = 'ready'
data['handle'] = self.handle
self.sendMessage(json.dumps(data))
# yield trollius.sleep(1)
elif data['action'] == 'paired':
self.pair_handle = data['pair']
elif data['action'] == 'game-start':
if data['next_handle'] == self.handle:
self.my_move = True
else:
self.my_move = False
self.open_positions = data['valid-moves']
elif data['action'] == 'valid-moves':
if data['next_handle'] == self.handle:
self.open_positions = data['valid-moves']
self.my_move = True
elif data['action'] == 'player-move':
pass
elif data['action'] == 'game-end':
print 'My Handle: ', self.handle, 'Pair Handle: ', self.pair_handle, 'Result: ', data['result'], ' : ', data['win_handle']
# Game Over
self.my_move = False
data = {}
data['action'] = 'ready'
data['handle'] = self.handle
self.sendMessage(json.dumps(data))
# yield trollius.sleep(1)
if self.my_move:
# select a piece to move
self.my_move = False
data = {}
data['action'] = 'player-move'
data['handle'] = self.handle
data['move'] = random.choice(self.open_positions.split(';'))
self.sendMessage(json.dumps(data))
# yield trollius.sleep(1)
def onClose(self, wasClean, code, reason):
print("WebSocket connection closed: {0}".format(reason))
def main_async(worker_numb):
print sys.argv
if len(sys.argv) > 1 and sys.argv[1] == 'local':
ws_host = '127.0.0.1'
ws_port = 8001
else:
ws_host = "websocket-ha-test.ovlizj.0001.usw1.cache.amazonaws.com"
ws_port = 80
# + ':' + str(ws_port) +
ws_host_url = 'ws://' + ws_host + ':' + str(ws_port) + '/tic-tac-toe/'
factory = WebSocketClientFactory(ws_host_url, debug=False)
factory.protocol = MyClientProtocol
loop = trollius.get_event_loop()
coro = loop.create_connection(factory, ws_host, ws_port)
loop.run_until_complete(coro)
loop.run_forever()
loop.close()
def main(worker_numb):
from twisted.python import log
from twisted.internet import reactor
log.startLogging(sys.stdout)
print sys.argv
if len(sys.argv) > 1 and sys.argv[1] == 'local':
ws_host = '127.0.0.1'
ws_port = 8001
else:
ws_host = "websocket-ha-test.ovlizj.0001.usw1.cache.amazonaws.com"
ws_port = 80
# + ':' + str(ws_port) +
ws_host_url = 'ws://' + ws_host + ':' + str(ws_port) + '/tic-tac-toe/'
factory = WebSocketClientFactory(ws_host_url, debug=False)
factory.protocol = MyClientProtocol
reactor.connectTCP(ws_host, ws_port, factory)
reactor.run()
if __name__ == '__main__':
# Setup a list of processes that we want to run
processes = [Process(target=main, args=(x,)) for x in range(4)]
# Run processes
for p in processes:
p.start()
# Exit the completed processes
for p in processes:
p.join()
|
% (offset, arg_name )
else:
if is_swapped:
if arg_type == "GLfloat" or arg_type == "GLclampf":
retval = "\tWRITE_DATA( %d, GLuint, SWAPFLOAT(%s) );" % (offset, arg_name)
elif arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "\tWRITE_SWAPPED_DOUBLE( %d, %s );" % (offset, arg_name)
elif apiutil.sizeof(arg_type) == 1:
retval = "\tWRITE_DATA( %d, %s, %s );" % (offset, arg_type, arg_name)
elif apiutil.sizeof(arg_type) == 2:
retval = "\tWRITE_DATA( %d, %s, SWAP16(%s) );" % (offset, arg_type, arg_name)
elif apiutil.sizeof(arg_type) == 4:
retval = "\tWRITE_DATA( %d, %s, SWAP32(%s) );" % (offset, arg_type, arg_name)
else:
if arg_type == "GLdouble" or arg_type == "GLclampd":
retval = "\tWRITE_DOUBLE( %d, %s );" % (offset, arg_name)
else:
retval = "\tWRITE_DATA( %d, %s, %s );" % (offset, arg_type, arg_name)
if retval == 9:
print >>sys.stderr, "no retval for %s %s" % (arg_name, arg_type)
assert 0
return retval
def UpdateCurrentPointer( func_name ):
m = re.search( r"^(Color|Normal)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
k = m.group(1)
name = '%s%s' % (k[:1].lower(),k[1:])
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.search( r"^(SecondaryColor)(3)(ub|b|us|s|ui|i|f|d)EXT$", func_name )
if m :
k = m.group(1)
name = 'secondaryColor'
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.search( r"^(TexCoord)([1234])(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
k = m.group(1)
name = 'texCoord'
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s[0] = data_ptr;" % (name,type)
return
m = re.search( r"^(MultiTexCoord)([1234])(ub|b|us|s|ui|i|f|d)ARB$", func_name )
if m :
k = m.group(1)
name = 'texCoord'
type = m.group(3) + m.group(2)
print "\tpc->current.c.%s.%s[texture-GL_TEXTURE0_ARB] = data_ptr + 4;" % (name,type)
return
m = re.match( r"^(Index)(ub|b|us|s|ui|i|f|d)$", func_name )
if m :
k = m.group(1)
name = 'index'
type = m.group(2) + "1"
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.match( r"^(EdgeFlag)$", func_name )
if m :
k = m.group(1)
name = 'edgeFlag'
type = "l1"
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.match( r"^(FogCoord)(f|d)EXT$", func_name )
if m :
k = m.group(1)
name = 'fogCoord'
type = m.group(2) + "1"
print "\tpc->current.c.%s.%s = data_ptr;" % (name,type)
return
m = re.search( r"^(VertexAttrib)([1234])N?(ub|b|s|f|d)(NV|ARB)$", func_name )
if m :
k = m.group(1)
name = 'vertexAttrib'
type = m.group(3) + m.group(2)
# Add 12 to skip the packet length, opcode and index fields
print "\tpc->current.c.%s.%s[index] = data_ptr + 12;" % (name,type)
if m.group(4) == "ARB" or m.group(4) == "NV":
print "\tpc->current.attribsUsedMask |= (1 << index);"
return
def PrintFunc( func_name, params, is_swapped, can_have_pointers ):
"""Emit a packer function."""
if is_swapped:
print 'void PACK_APIENTRY crPack%sSWAP( %s )' % (func_name, apiutil.MakeDeclarationString(params))
else:
print 'void PACK_APIENTRY crPack%s( %s )' % (func_name, apiutil.MakeDeclarationString(params))
print '{'
print '\tCR_GET_PACKER_CONTEXT(pc);'
# Save original function name
orig_func_name = func_name
# Convert to a non-vector version of the function if possible
func_name = apiutil.NonVectorFunction( func_name )
if not func_name:
func_name = orig_func_name
# Check if there are any pointer parameters.
# That's usually a problem so we'll emit an error function.
nonVecParams = apiutil.Parameters(func_name)
bail_out = 0
for (name, type, vecSize) in nonVecParams:
if apiutil.IsPointer(type) and vecSize == 0 and not can_have_pointers:
bail_out = 1
if bail_out:
for (name, type, vecSize) in nonVecParams:
print '\t(void)%s;' % (name)
print '\tcrError ( "%s needs to be special cased %d %d!");' % (func_name, vecSize, can_have_pointers)
print '\t(void) pc;'
print '}'
# XXX we should really abort here
return
if "extpack" in apiutil.ChromiumProps(func_name):
is_extended = 1
else:
is_extended = 0
print "\tunsigned char *data_ptr;"
print '\t(void) pc;'
#if func_name == "Enable" or func_name == "Disable":
# print "\tCRASSERT(!pc->buffer.geometry_only); /* sanity check */"
for index in range(0,len(params)):
(name, type, vecSize) = params[index]
if vecSize>0 and func_name!=orig_func_name:
print " if (!%s) {" % name
# Know the reason for this one, so avoid the spam.
if orig_func_name != "SecondaryColor3fvEXT":
print " crDebug(\"App passed NULL as %s for %s\");" % (name, orig_func_name)
print " return;"
print " }"
packet_length = apiutil.PacketLength(nonVecParams)
if packet_length == 0 and not is_extended:
print "\tCR_GET_BUFFERED_POINTER_NO_ARGS( pc );"
elif func_name[:9] == "Translate" or func_name[:5] == "Color":
# XXX WTF is the purpose of this?
if is_extended:
packet_length += 8
print "\tCR_GET_BUFFERED_POINTER_NO_BEGINEND_FLUSH( pc, %d, GL_TRUE );" % packet_length
else:
if is_extended:
packet_length += 8
print "\tCR_GET_BUFFERED_POINTER( pc, %d );" % packet_length
UpdateCurrentPointer( func_name )
if is_extended:
counter = 8
print WriteData( 0, 'GLint', packet_length, is_swapped )
print WriteData( 4, 'GLenum', apiutil.ExtendedOpcodeName( func_name ), is_swapped )
else:
counter = 0
# Now emit the WRITE_() macros for all parameters
for index in range(0,len(params)):
(name, type, vecSize) = params[index]
# if we're converting a vector-valued function to a non-vector func:
if vecSize > 0 and func_name != orig_func_name:
ptrType = apiutil.PointerType(type)
for i in range(0, vecSize):
print WriteData( counter + i * apiutil.sizeof(ptrType),
ptrType, "%s[%d]" % (name, i), is_swapped )
# XXX increment counter here?
else:
print WriteData( counter, type, name, is_swapped )
if apiutil.IsPointer(type):
counter += apiutil.PointerSize()
else: |
counter += apiutil.sizeof(type)
# finish up
if is_extended:
print "\tWRITE_OPCODE( pc, CR_EXTEND_OPCODE );"
else:
print "\tWRITE_OPCODE( pc, %s );" % apiutil.OpcodeName( func_name )
print '\tCR_UNLOCK_PACKER_CONTEXT(pc);'
print '}\n'
apiutil.CopyrightC()
print """
/* DO NOT EDIT - THIS FILE GENERATED BY THE packer.py SCRIPT */
/* For each of the OpenGL functions we have a packer function which
* packs the function's opc | ode and arguments into a buffer.
*/
#include "packer.h"
#include "cr_opcodes.h"
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in keys:
if apiutil.FindSpecial( "packer", func_name ):
continue
if not apiutil.HasPackOpcode(func_name):
continue
pointers_ok = 0
return_type = apiutil.ReturnType(func_name)
params = apiutil.Parameters(func_name)
if return_type != 'void':
# Yet another gross hack for glGetString
if string.find( return_type, '*' ) == -1:
return_type = return_type + " *"
params.append(("return_value", |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope | that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License |
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import mrp_byproduct
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
ta=data, params=params,
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
return self.parse_response(r, accept_status_codes=[200, 201, 202])
def delete(self, uri, params=dict()):
"""sends a DELETE to the given URI.
Return the response XML as an ElementTree.
"""
r = requests.delete(uri, params=params,
auth=(self.username, self.password),
headers={'content-type': 'application/xml',
'accept': 'application/xml'})
return self.validate_response(r, accept_status_codes=[204])
def check_version(self):
"""Raise ValueError if the version for this interface
does not match any of the versions given for the API.
"""
uri = urljoin(self.baseuri, 'api')
r = requests.get(uri, auth=(self.username, self.password))
root = self.parse_response(r)
tag = nsmap('ver:versions')
assert tag == root.tag
for node in root.findall('version'):
if node.attrib['major'] == self.VERSION: return
raise ValueError('version mismatch')
def validate_response(self, response, accept_status_codes=[200]):
"""Parse the XML returned in the response.
Raise an HTTP error if the response status is not one of the
specified accepted status codes.
"""
if response.status_code not in accept_status_codes:
try:
root = ElementTree.fromstring(response.content)
node = root.find('message')
if node is None:
response.raise_for_status()
message = "%s" % (response.status_code)
else:
message = "%s: %s" % (response.status_code, node.text)
node = root.find('suggested-actions')
if node is not None:
message += ' ' + node.text
except ElementTree.ParseError: # some error messages might not follow the xml standard
message = response.content
raise requests.exceptions.HTTPError(message)
return True
def parse_response(self, response, accept_status_codes=[200]):
"""Parse the XML returned in the response.
Raise an HTTP error if the response status is not 200.
"""
self.validate_response(response, accept_status_codes)
root = ElementTree.fromstring(response.content)
return root
def get_udfs(self, name=None, attach_to_name=None, attach_to_category=None, start_index=None, add_info=False):
"""Get a list of udfs, filtered by keyword arguments.
name: name of udf
attach_to_name: item in the system, to wich the udf is attached, such as
Sample, Project, Container, or the name of a process.
attach_to_category: If 'attach_to_name' is the name of a process, such as 'CaliperGX QC (DNA)',
then you need to set attach_to_category='ProcessType'. Must not be provided otherwise.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
attach_to_name=attach_to_name,
attach_to_category=attach_to_category,
start_index=start_index)
return self._get_instances(Udfconfig, add_info=add_info, params=params)
def get_reagent_types(self, name=None, start_index=None):
"""Get a list of reqgent types, filtered by keyword arguments.
name: reagent type name, or list of names.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
start_index=start_index)
return self._get_instances(ReagentType, params=params)
def get_labs(self, name=None, last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None, add_info=False):
"""Get a list of labs, filtered by keyword arguments.
name: Lab name, or list of names.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Lab, add_info=add_info, params=params)
def get_researchers(self, firstname=None, lastname=None, username=None,
last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of researchers, filtered by keyword arguments.
firstname: Researcher first name, or list of names.
lastname: Researcher last name, or list of names.
username: Researcher account name, or list of names.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(firstname=firstname,
lastname=lastname,
username=username,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Researcher, add_info=add_info, params=params)
def get_projects(self, name=None, open_date=None, last_modified=None,
udf=dict(), udtname=None, udt=dict(), start_index=None,
add_info=False):
"""Get a list of projects, filtered by keyword arguments.
name: Project name, or list of names.
open_date: Since | the given ISO format date.
last_modified: Since the given ISO format datetime.
udf: dictionary of UDFs with 'UDFNAME[OPERATOR]' as keys.
udtname: UDT name, or list of names.
udt: dictionary of UDT UDFs with 'UDTNAME.UDFNAME[OPERATOR]' as keys
| and a string or list of strings as value.
start_index: Page to retrieve; all if None.
"""
params = self._get_params(name=name,
open_date=open_date,
last_modified=last_modified,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
return self._get_instances(Project, add_info=add_info, params=params)
def get_sample_number(self, name=None, projectname=None, projectlimsid=None,
udf=dict(), udtname=None, udt=dict(), start_index=None):
"""Gets the number of samples matching the query without fetching every
sample, so it should be faster than len(get_samples()"""
params = self._get_params(name=name,
projectname=projectname,
projectlimsid=projectlimsid,
start_index=start_index)
params.update(self._get_params_udf(udf=udf, udtname=udtname, udt=udt))
root = self.get(self.get_uri(Sample._URI), params=params)
total = 0
while params.get('start-index') is None: # Loop over all pages.
total += len(root.findall("sample"))
node |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.SimpleHoster import SimpleHoster, create_getInfo
class FileStoreTo(SimpleHoster):
__name__ = "FileStoreTo"
__type__ = "hoster"
__version__ = "0.07"
__status__ = "testing"
__pattern__ = r'http://(?:www\.)?filestore\.to/\?d=(?P<ID>\w+)'
__config__ = [("activated" , "bool", "Activated" , True),
("use_premium", "bool", "Use premium account if available", True)]
__description__ = """FileSt | ore.to hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com"),
("stickell", "l.stickell@yahoo.it")]
|
INFO_PATTERN = r'File: <span.*?>(?P<N>.+?)<.*>Size: (?P<S>[\d.,]+) (?P<U>[\w^_]+)'
OFFLINE_PATTERN = r'>Download-Datei wurde nicht gefunden<'
TEMP_OFFLINE_PATTERN = r'>Der Download ist nicht bereit !<'
def setup(self):
self.resume_download = True
self.multiDL = True
def handle_free(self, pyfile):
self.wait(10)
self.link = self.load("http://filestore.to/ajax/download.php",
get={'D': re.search(r'"D=(\w+)', self.html).group(1)})
getInfo = create_getInfo(FileStoreTo)
|
de
@return: bool
"""
path1 = GetAbsPath(path1)
path2 = GetAbsPath(path2)
if WIN:
path1 = path1.lower()
path2 = path2.lower()
return path1 == path2
def CopyFile(orig, dest):
"""Copy the given file to the destination
@param orig: file to copy (full path)
@param dest: where to copy to
"""
raise NotImplementedError
@uri2path
def GetAbsPath(path):
"""Get the absolute path of a file of a file.
@param path: string
@return: string
@note: on windows if win32api is available short notation paths will be
converted to the proper long name.
"""
rpath = os.path.abspath(path)
# Resolve short path notation on Windows when possible
if WIN and win32api is not None and u"~" in rpath:
try:
rpath = win32api.GetLongPathNameW(rpath)
except Exception:
# Ignore errors from win32api calls
pass
return rpath
def GetFileExtension(file_str):
"""Gets last atom at end of string as extension if
no extension whole string is returned
@param file_str: path or file name to get extension from
"""
return file_str.split('.')[-1]
def GetFileModTime(file_name):
"""Returns the time that the given file was last modified on
@param file_name: path of file to get mtime of
"""
try:
mod_time = os.path.getmtime(file_name)
except (OSError, EnvironmentError):
mod_time = 0
return mod_time
def GetFileName(path):
"""Gets last atom on end of string as filename
@param path: full path to get filename from
"""
return os.path.split(path)[-1]
@uri2path
def GetFileSize(path):
"""Get the size of the file at a given path
@param path: Path to file
@return: long
"""
try:
return os.stat(path)[stat.ST_SIZE]
except:
return 0
def GetPathFromURI(path):
"""Get a local path from a file:// uri
@return: normalized path
"""
if path.startswith(u"file:"):
path = path.replace(u"file:", u"")
path = path.lstrip(u"/")
if platform.system().lower() in ('windows', 'microsoft'):
path = path.replace(u"/", u"\\")
if len(path) >= 2 a | nd path[1] != u':':
# A valid windows file uri | should start with the drive
# letter. If not make the assumption that it should be
# the C: drive.
path = u"C:\\\\" + path
else:
path = u"/" + path
path = urllib2.unquote(path)
return path
@uri2path
def GetPathName(path):
"""Gets the path minus filename
@param path: full path to get base of
"""
return os.path.split(path)[0]
@uri2path
def IsLink(path):
"""Is the file a link
@return: bool
"""
if WIN:
return path.endswith(".lnk") or os.path.islink(path)
else:
return os.path.islink(path)
@uri2path
def PathExists(path):
"""Does the path exist.
@param path: file path or uri
@return: bool
"""
return os.path.exists(path)
@uri2path
def IsExecutable(path):
"""Is the file at the given path an executable file
@param path: file path
@return: bool
"""
return os.path.isfile(path) and os.access(path, os.X_OK)
@uri2path
def ResolveRealPath(link):
"""Return the real path of the link file
@param link: path of link file
@return: string
"""
assert IsLink(link), "ResolveRealPath expects a link file!"
realpath = link
if WIN and win32client is not None:
shell = win32client.Dispatch("WScript.Shell")
shortcut = shell.CreateShortCut(link)
realpath = shortcut.Targetpath
else:
realpath = os.path.realpath(link)
return realpath
def GetFileManagerCmd():
"""Get the file manager open command for the current os. Under linux
it will check for xdg-open, nautilus, konqueror, and Thunar, it will then
return which one it finds first or 'nautilus' it finds nothing.
@return: string
"""
if wx.Platform == '__WXMAC__':
return 'open'
elif wx.Platform == '__WXMSW__':
return 'explorer'
else:
# Check for common linux filemanagers returning first one found
# Gnome/ubuntu KDE/kubuntu xubuntu
for cmd in ('xdg-open', 'nautilus', 'konqueror', 'Thunar'):
result = os.system("which %s > /dev/null" % cmd)
if result == 0:
return cmd
else:
return 'nautilus'
def OpenWithFileManager(path):
"""Open the given path with the systems file manager
@param path: file/directory path
"""
cmd = GetFileManagerCmd()
subprocess.call([cmd, path])
def Which(program):
"""Find the path of the given executable
@param program: executable name (i.e 'python')
@return: executable path or None
"""
# Check local directory first
if IsExecutable(program):
return program
else:
# Start looking on the $PATH
for path in os.environ["PATH"].split(os.pathsep):
exe_file = os.path.join(path, program)
if IsExecutable(exe_file):
return exe_file
return None
def GetDirectoryObject(path, recurse=True, includedot=False):
"""Gets a L{Directory} object representing the filesystem of the
given path.
@param path: base path to list
@keyword recurse: recurse into subdirectories
@keyword includedot: include '.' files
@return: L{Directory} object instance
"""
assert os.path.isdir(path)
pjoin = os.path.join
def _BuildDir(thedir):
for fname in os.listdir(thedir.Path):
if not includedot and fname.startswith('.'):
continue
fpath = pjoin(thedir.Path, fname)
if os.path.isdir(fpath):
newobj = Directory(fpath)
if recurse:
_BuildDir(newobj)
else:
newobj = File(fpath)
thedir.Files.append(newobj)
dobj = Directory(path)
_BuildDir(dobj)
return dobj
#-----------------------------------------------------------------------------#
class File(object):
"""Basic file data structure"""
__slots__ = ('path', 'modtime')
def __init__(self, path):
super(File, self).__init__()
self.path = path
self.modtime = GetFileModTime(self.path)
Path = property(lambda self: self.path)
Name = property(lambda self: os.path.basename(self.Path))
ModTime = property(lambda self: self.modtime,
lambda self, mod: setattr(self, 'modtime', mod))
def __str__(self):
return self.Path
def __eq__(self, other):
assert isinstance(other, File)
return ComparePaths(self.Path, other.Path)
class Directory(File):
"""Basic directory data structure.
Is a container class that provides a simple in memory representation of
a file system.
"""
__slots__ = ('files',)
def __init__(self, path):
super(Directory, self).__init__(path)
self.files = list()
Files = property(lambda self: self.files)
#-----------------------------------------------------------------------------#
def GetUniqueName(path, name):
"""Make a file name that will be unique in case a file of the
same name already exists at that path.
@param path: Root path to folder of files destination
@param name: desired file name base
@return: string
"""
tmpname = os.path.join(path, name)
if os.path.exists(tmpname):
if '.' not in name:
ext = ''
fbase = name
else:
ext = '.' + name.split('.')[-1]
fbase = name[:-1 * len(ext)]
inc = len([x for x in os.listdir(path) if x.startswith(fbase)])
tmpname = os.path.join(path, "%s-%d%s" % (fbase, inc, ext))
while os.path.exists(tmpname):
inc = inc + 1
tmpname = os.path.join(path, "%s-%d%s" % (fbase, inc, ext))
return tmpname
#-----------------------------------------------------------------------------#
def MakeNewFile(path, name):
""" |
Background(detector, dfbkg)
fm_left = Measure(image)
logging.debug("Focus level (left) at %f is %f", left, fm_left)
focus_levels[left] = fm_left
last_pos = left
fm_range = (fm_left, fm_center, fm_right)
pos_range = (left, center, right)
best_fm = max(fm_range)
i_max = fm_range.index(best_fm)
best_pos = pos_range[i_max]
if future._autofocus_state == CANCELLED:
raise CancelledError()
# if best focus was found at the center
if i_max == 1:
step_factor /= 2
if not max_reached:
logging.debug("Now zooming in on improved focus")
max_reached = True
elif (rng[0] > best_pos - step_factor * min_step or
rng[1] < best_pos + step_factor * min_step):
step_factor /= 1.5
logging.debug("Reducing step factor to %g because the focus (%g) is near range limit %s",
step_factor, best_pos, rng)
if step_factor <= 8:
max_reached = True # Force re-checking data
focus.moveAbsSync({"z": best_pos})
step_cntr += 1
if step_cntr == MAX_STEPS_NUMBER:
logging.info("Auto focus gave up after %d steps @ %g m", step_cntr, best_pos)
else:
logging.info("Auto focus found best level %g @ %g m", best_fm, best_pos)
return best_pos, best_fm
except CancelledError:
# Go to the best position known so far
focus.moveAbsSync({"z": best_pos})
finally:
with future._autofocus_lock:
if future._autofocus_state == CANCELLED:
raise CancelledError()
future._autofocus_state = FINISHED
def _CancelAutoFocus(future):
"""
Canceller of _DoAutoFocus task.
"""
logging.debug("Cancelling autofocus...")
with future._autofocus_lock:
if future._autofocus_state == FINISHED:
return False
future._autofocus_state = CANCELLED
logging.debug("Autofocus cancellation requested.")
return True
# TODO: drop steps, which is unused, or use it
def estimateAutoFocusTime(exposure_time, steps=MAX_STEPS_NUMBER):
"""
Estimates overlay procedure duration
"""
return steps * exposure_time
def AutoFocus(detector, emt, focus, dfbkg=None, good_focus=None):
"""
Wrapper for DoAutoFocus. It provides the ability to check the progress of autofocus
procedure or even cancel it.
detector (model.DigitalCamera or model.Detector): Detector on which to
improve the focus quality
emt (None or model.Emitter): In case of a SED this is the scanner used
focus (model.Actuator): The focus actuator
dfbkg (model.DataFlow or None): If provided, will be used to start/stop
the e-beam emission (it must be the dataflow of se- or bs-detector) in
order to do background subtraction. If None, no background subtraction is
performed.
good_focus (float): if provided, an already known good focus position to be
taken into consideration while autofocusing
returns (model.ProgressiveFuture): Progress of DoAutoFocus, whose result() will return:
Focus position (m)
Focus level
"""
# Create ProgressiveFuture and update its state to RUNNING
est_start = time.time() + 0.1
# Check if the emitter is a scanner (focusing = SEM)
if model.hasVA(emt, "dwellTime"):
et = emt.dwellTime.value * numpy.prod(emt.resolution.value)
elif model.hasVA(detector, "exposureTime"):
et = detector.exposureTime.value
else:
# Completely random... but we are in a case where probably that's the last
# thing the caller will care about.
et = 1
f = model.ProgressiveFuture(start=est_start,
end=est_start + estimateAutoFocusTime(et))
f._autofocus_state = RUNNING
f._autofocus_lock = threading.Lock()
f.task_canceller = _CancelAutoFocus
# Run in separate thread
autofocus_thread = threading.Thread(target=executeTask,
name="Autofocus",
args=(f, _DoAutoFocus, f, detector, emt,
focus, dfbkg, good_focus))
autofocus_thread.start()
return f
def AutoFocusSpectrometer(spectrograph, focuser, detectors, selector=None):
"""
Run autofocus for a spectrograph. It will actually run autofocus on each
gratings, and for each detectors. The input slit should already be in a
good position (typically, almost closed), and a light source should be
active.
Note: it's currently tailored to the Andor Shamrock SR-193i. It's recommended
to put the detector on the "direct" output as first detector.
spectrograph (Actuator): should have grating and wavelength.
focuser (Actuator): should have a z axis
detectors (Detector or list of Detectors): all the detectors available on
the spectrometer. The first detector will be used to autofocus all the
gratings, and each other detector will be focused with the original
grating.
selector (Actuator or None): must have a rx axis with each position corresponding
to one of the detectors. If there is only one detector, selector can be None.
return (ProgressiveFuture -> dict((grating, detector)->focus position)): a progressive future
which will eventually return a map of grating/detector -> focus position.
"""
if not isinstance(detectors, collections.Iterable):
detectors = [detectors]
if not detectors:
raise ValueError("At least one detector must be provided")
# Create ProgressiveFuture and update its state to RUNNING
est_start = time.time() + 0.1
detector = detectors[0]
if model.hasVA(detector, "exposureTime"):
et = detector.exposureTime.value
else:
# Completely random... but we are in a case where probably that's the last
# thing the caller will care about.
et = 1
# 1 time / grating + 1 time / extra detector
cnts = len(spectrograph.axes["grating"].choices) + (len(detectors) - 1)
f = model.ProgressiveFuture(start=est_start,
end=est_start + cnts * estimateAutoFocusTime(et))
f.task_canceller = _CancelAutoFocusSpectrometer
# Extra info for the canceller
f._autofocus_state = RUNNING
f._autofocus_lock = threading.Lock()
f._subfuture = InstantaneousFuture()
# Run in separate thread
autofocus_thread = threading.Thread(target=executeTask,
name="Spectrometer Autofocus",
args=(f, _DoAutoFocusSpectrometer, f,
spectrograph, focuser, detectors, selector))
autofocus_thread.start()
return f
def _moveSelectorToDetector(selector, detector):
"""
Move the selector to hav | e the given detector receive light
selector (Actuator): a rx axis with a position
detector (Component): the component to receive light
return (position): the new position of the selector
raise LookupError: if no position on the selector affects the detector
"""
# TODO: handle every way of indicating affect position in acq.path? -> move to odemis.util
mv = {}
for an, ad in selector.axes.items():
i | f hasattr(ad, "choices") and isinstance(ad.choices, dict):
for pos, value in ad.choices.items():
if detector.name in value:
# set the position so it points to the target
mv[an] = pos
if mv:
selector.moveAbsSync(mv)
return mv
raise LookupError("Failed to find detector '%s' in positions of selector axes %s" %
(detector.name, selector.axes.keys()))
def _updateAFSProgress(future, last_dur, left):
"""
Update the progress of the future based on duration of the previous autofocus
future (Progres |
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.tools.translate import _
from openerp.osv import fields, osv
class project_compute_phases(osv.osv_memory):
_name = 'project.compute.phases'
_description = 'Project Compute Phases'
_columns = {
'target_project': fields.selection([
('all', 'Compute All My Projects'),
('one', 'Compute a Single Project'),
], 'Action', required=True),
'project_id': fields.many2one('project.project', 'Project')
}
_defaults = {
'target_project': 'one'
}
def check_selection(self, cr, uid, ids, context=None):
return self.compute_date(cr, uid, ids, context=context)
def compute_date(self, cr, uid, ids, context=None):
"""
Compute the phases for scheduling.
"""
project_pool = self.pool.get('project.project')
data = self.read(cr, uid, ids, [], context=context)[0]
if not data['project_id'] and data['target_project'] == 'one':
raise osv.except_osv | (_('Error!'), _('Please specify a project to schedule.'))
if data['target_project'] == 'one':
project_ids = [data['project_id'][0]]
else:
project_ids = project_pool.search(cr, uid, [('user_id','=',uid)], context=context)
p | roject_pool.schedule_phases(cr, uid, project_ids, context=context)
return self._open_phases_list(cr, uid, data, context=context)
def _open_phases_list(self, cr, uid, data, context=None):
"""
Return the scheduled phases list.
"""
if context is None:
context = {}
mod_obj = self.pool.get('ir.model.data')
act_obj = self.pool.get('ir.actions.act_window')
result = mod_obj._get_id(cr, uid, 'project_long_term', 'act_project_phase')
id = mod_obj.read(cr, uid, [result], ['res_id'])[0]['res_id']
result = act_obj.read(cr, uid, [id], context=context)[0]
result['target'] = 'current'
project_id = data.get('project_id') and data.get('project_id')[0] or False
result['context'] = {"search_default_project_id":project_id, "default_project_id":project_id, "search_default_current": 1}
return result
project_compute_phases()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
t Decimal, ROUND_DOWN
import hashlib
import json
import logging
import os
import random
import re
from subprocess import CalledProcessError
import time
from . import coverage
from .authproxy import AuthSe | rviceProxy, JSONRP | CException
logger = logging.getLogger("TestFramework.utils")
# Assert functions
##################
def assert_fee_amount(fee, tx_size, fee_per_kB):
"""Assert the fee was in range"""
target_fee = round(tx_size * fee_per_kB / 1000, 8)
if fee < target_fee:
raise AssertionError("Fee of %s BTC too low! (Should be %s BTC)" % (str(fee), str(target_fee)))
# allow the wallet's estimation to be at most 2 bytes off
if fee > (tx_size + 20) * fee_per_kB / 1000:
raise AssertionError("Fee of %s BTC too high! (Should be %s BTC)" % (str(fee), str(target_fee)))
def assert_equal(thing1, thing2, *args):
if thing1 != thing2 or any(thing1 != arg for arg in args):
raise AssertionError("not(%s)" % " == ".join(str(arg) for arg in (thing1, thing2) + args))
def assert_greater_than(thing1, thing2):
if thing1 <= thing2:
raise AssertionError("%s <= %s" % (str(thing1), str(thing2)))
def assert_greater_than_or_equal(thing1, thing2):
if thing1 < thing2:
raise AssertionError("%s < %s" % (str(thing1), str(thing2)))
def assert_raises(exc, fun, *args, **kwds):
assert_raises_message(exc, None, fun, *args, **kwds)
def assert_raises_message(exc, message, fun, *args, **kwds):
try:
fun(*args, **kwds)
except JSONRPCException:
raise AssertionError("Use assert_raises_rpc_error() to test RPC failures")
except exc as e:
if message is not None and message not in e.error['message']:
raise AssertionError("Expected substring not found:" + e.error['message'])
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
raise AssertionError("No exception raised")
def assert_raises_process_error(returncode, output, fun, *args, **kwds):
"""Execute a process and asserts the process return code and output.
Calls function `fun` with arguments `args` and `kwds`. Catches a CalledProcessError
and verifies that the return code and output are as expected. Throws AssertionError if
no CalledProcessError was raised or if the return code and output are not as expected.
Args:
returncode (int): the process return code.
output (string): [a substring of] the process output.
fun (function): the function to call. This should execute a process.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
try:
fun(*args, **kwds)
except CalledProcessError as e:
if returncode != e.returncode:
raise AssertionError("Unexpected returncode %i" % e.returncode)
if output not in e.output:
raise AssertionError("Expected substring not found:" + e.output)
else:
raise AssertionError("No exception raised")
def assert_raises_rpc_error(code, message, fun, *args, **kwds):
"""Run an RPC and verify that a specific JSONRPC exception code and message is raised.
Calls function `fun` with arguments `args` and `kwds`. Catches a JSONRPCException
and verifies that the error code and message are as expected. Throws AssertionError if
no JSONRPCException was raised or if the error code/message are not as expected.
Args:
code (int), optional: the error code returned by the RPC call (defined
in src/rpc/protocol.h). Set to None if checking the error code is not required.
message (string), optional: [a substring of] the error string returned by the
RPC call. Set to None if checking the error string is not required.
fun (function): the function to call. This should be the name of an RPC.
args*: positional arguments for the function.
kwds**: named arguments for the function.
"""
assert try_rpc(code, message, fun, *args, **kwds), "No exception raised"
def try_rpc(code, message, fun, *args, **kwds):
"""Tries to run an rpc command.
Test against error code and message if the rpc fails.
Returns whether a JSONRPCException was raised."""
try:
fun(*args, **kwds)
except JSONRPCException as e:
# JSONRPCException was thrown as expected. Check the code and message values are correct.
if (code is not None) and (code != e.error["code"]):
raise AssertionError("Unexpected JSONRPC error code %i" % e.error["code"])
if (message is not None) and (message not in e.error['message']):
raise AssertionError("Expected substring not found:" + e.error['message'])
return True
except Exception as e:
raise AssertionError("Unexpected exception raised: " + type(e).__name__)
else:
return False
def assert_is_hex_string(string):
try:
int(string, 16)
except Exception as e:
raise AssertionError(
"Couldn't interpret %r as hexadecimal; raised: %s" % (string, e))
def assert_is_hash_string(string, length=64):
if not isinstance(string, str):
raise AssertionError("Expected a string, got type %r" % type(string))
elif length and len(string) != length:
raise AssertionError(
"String of length %d expected; got %d" % (length, len(string)))
elif not re.match('[abcdef0-9]+$', string):
raise AssertionError(
"String %r contains invalid characters for a hash." % string)
def assert_array_result(object_array, to_match, expected, should_not_find=False):
"""
Pass in array of JSON objects, a dictionary with key/value pairs
to match against, and another dictionary with expected key/value
pairs.
If the should_not_find flag is true, to_match should not be found
in object_array
"""
if should_not_find:
assert_equal(expected, {})
num_matched = 0
for item in object_array:
all_match = True
for key, value in to_match.items():
if item[key] != value:
all_match = False
if not all_match:
continue
elif should_not_find:
num_matched = num_matched + 1
for key, value in expected.items():
if item[key] != value:
raise AssertionError("%s : expected %s=%s" % (str(item), str(key), str(value)))
num_matched = num_matched + 1
if num_matched == 0 and not should_not_find:
raise AssertionError("No objects matched %s" % (str(to_match)))
if num_matched > 0 and should_not_find:
raise AssertionError("Objects were found %s" % (str(to_match)))
# Utility functions
###################
def check_json_precision():
"""Make sure json library being used does not lose precision converting BTC values"""
n = Decimal("20000000.00000003")
satoshis = int(json.loads(json.dumps(float(n))) * 1.0e8)
if satoshis != 2000000000000003:
raise RuntimeError("JSON encode/decode loses precision")
def count_bytes(hex_string):
return len(bytearray.fromhex(hex_string))
def bytes_to_hex_str(byte_str):
return hexlify(byte_str).decode('ascii')
def hash256(byte_str):
sha256 = hashlib.sha256()
sha256.update(byte_str)
sha256d = hashlib.sha256()
sha256d.update(sha256.digest())
return sha256d.digest()[::-1]
def hex_str_to_bytes(hex_str):
return unhexlify(hex_str.encode('ascii'))
def str_to_b64str(string):
return b64encode(string.encode('utf-8')).decode('ascii')
def satoshi_round(amount):
return Decimal(amount).quantize(Decimal('0.00000001'), rounding=ROUND_DOWN)
def wait_until(predicate, *, attempts=float('inf'), timeout=float('inf'), lock=None):
if attempts == float('inf') and timeout == float('inf'):
timeout = 60
attempt = 0
timeout += time.time()
while attempt < attempts and time.time() < timeout:
if lock:
with lock:
if predicate():
return
else:
if predicate():
return
|
stalk returns an
InvalidParameterCombination error. If you do not specify either,
AWS Elastic Beanstalk returns MissingRequiredParameter error.
"""
params = {'ApplicationName': application_name}
if template_name:
params['TemplateName'] = template_name
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeConfigurationSettings', params)
def describe_environment_resources(self, environment_id=None,
environment_name=None):
"""Returns AWS resources for this environment.
:type environment_id: string
:param environment_id: The ID of the environment to retrieve AWS
resource usage data. Condition: You must specify either this or an
EnvironmentName, or both. If you do not specify either, AWS Elastic
Beanstalk returns MissingRequiredParameter error.
:type environment_name: string
:param environment_name: The name of the environment to retrieve
AWS resource usage data. Condition: You must specify either this
or an EnvironmentId, or both. If you do not specify either, AWS
Elastic Beanstalk returns MissingRequiredParameter error.
:raises: InsufficientPrivilegesException
"""
params = {}
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
return self._get_response('DescribeEnvironmentResources', params)
def describe_environments(self, application_name=None, version_label=None,
environment_ids=None, environment_names=None,
include_deleted=None,
included_deleted_back_to=None):
"""Returns descriptions for existing environments.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that are associated
with this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to include only those that are associated
with this application version.
:type environment_ids: list
:param environment_ids: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified IDs.
:type environment_names: list
:param environment_names: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those that have the
specified names.
:type include_deleted: boolean
:param include_deleted: Indicates whether to include deleted
environments: true: Environments that have been deleted after
IncludedDeletedBackTo are displayed. false: Do not include deleted
environments.
:type included_deleted_back_to: timestamp
:param included_deleted_back_to: If specified when IncludeDeleted is
set to true, then environments deleted after this date are
displayed.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
p | arams['VersionLabel'] = version_label
if environment_ids:
self.build_list_params(params, environment_ids,
'EnvironmentIds.member')
if environment_names:
s | elf.build_list_params(params, environment_names,
'EnvironmentNames.member')
if include_deleted:
params['IncludeDeleted'] = self._encode_bool(include_deleted)
if included_deleted_back_to:
params['IncludedDeletedBackTo'] = included_deleted_back_to
return self._get_response('DescribeEnvironments', params)
def describe_events(self, application_name=None, version_label=None,
template_name=None, environment_id=None,
environment_name=None, request_id=None, severity=None,
start_time=None, end_time=None, max_records=None,
next_token=None):
"""Returns event descriptions matching criteria up to the last 6 weeks.
:type application_name: string
:param application_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to include only those associated with
this application.
:type version_label: string
:param version_label: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those associated with this application
version.
:type template_name: string
:param template_name: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that are associated with this
environment configuration.
:type environment_id: string
:param environment_id: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type environment_name: string
:param environment_name: If specified, AWS Elastic Beanstalk restricts
the returned descriptions to those associated with this
environment.
:type request_id: string
:param request_id: If specified, AWS Elastic Beanstalk restricts the
described events to include only those associated with this request
ID.
:type severity: string
:param severity: If specified, limits the events returned from this
call to include only those with the specified severity or higher.
:type start_time: timestamp
:param start_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur on or after this time.
:type end_time: timestamp
:param end_time: If specified, AWS Elastic Beanstalk restricts the
returned descriptions to those that occur up to, but not including,
the EndTime.
:type max_records: integer
:param max_records: Specifies the maximum number of events that can be
returned, beginning with the most recent event.
:type next_token: string
:param next_token: Pagination token. If specified, the events return
the next batch of results.
"""
params = {}
if application_name:
params['ApplicationName'] = application_name
if version_label:
params['VersionLabel'] = version_label
if template_name:
params['TemplateName'] = template_name
if environment_id:
params['EnvironmentId'] = environment_id
if environment_name:
params['EnvironmentName'] = environment_name
if request_id:
params['RequestId'] = request_id
if severity:
params['Severity'] = severity
if start_time:
params['StartTime'] = start_time
if end_time:
params['EndTime'] = end_time
if max_records:
params['MaxRecords'] = max_records
if next_token:
params['NextToken'] = next_token
return self._get_response('DescribeEvents', params)
def list_available_solution_stacks(self):
"""Returns a list of the available solution stack names."""
return self._get_response('ListAvailableSolutionStacks', params={})
def rebuild_environment(self, environment_id=None, environment_name=None):
"""
Deletes and recreates all of the AWS resources (for example:
the Auto Scaling group, load balancer, etc.) for a specified
environment and forces a restart.
:type enviro |
, code)
if account is None:
raise ExchangeError(self.id + ' fetchLedger() could not find account id for ' + code)
request = {
'id': account['id'],
# 'start_date': self.iso8601(since),
# 'end_date': self.iso8601(self.milliseconds()),
# 'before': 'cursor', # sets start cursor to before date
# 'after': 'cursor', # sets end cursor to after date
# 'limit': limit, # default 100
# 'profile_id': 'string'
}
if since is not None:
request['start_date'] = self.iso8601(since)
if limit is not None:
request['limit'] = limit # default 100
response = self.privateGetAccountsIdLedger(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_ledger(response, currency, since, limit)
def fetch_transactions(self, code=None, since=None, limit=None, params={}):
self.load_markets()
self.load_accounts()
currency = None
id = self.safe_string(params, 'id') # account id
if id is None:
if code is not None:
currency = self.currency(code)
accountsByCurrencyCode = self.index_by(self.accounts, 'currency')
account = self.safe_value(accountsByCurrencyCode, code)
if account is None:
raise ExchangeError(self.id + ' fetchTransactions() could not find account id for ' + code)
id = account['id']
request = {}
if id is not None:
request['id'] = id
if limit is not None:
request['limit'] = limit
response = None
if id is None:
response = self.privateGetTransfers(self.extend(request, params))
for i in range(0, len(response)):
account_id = self.safe_string(response[i], 'account_id')
account = self.safe_value(self.accountsById, account_id)
code = self.safe_string(account, 'currency')
response[i]['currency'] = code
else:
response = self.privateGetAccountsIdTransfers(self.extend(request, params))
for i in range(0, len(response)):
response[i]['currency'] = code
return self.parse_transactions(response, currency, since, limit)
def fetch_deposits(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions(code, since, limit, self.extend({'type': 'deposit'}, params))
def fetch_withdrawals(self, code=None, since=None, limit=None, params={}):
return self.fetch_transactions(code, since, limit, | self.extend({'type': 'withdraw'}, params))
def parse_transaction_status(self, transaction):
canceled = self.safe_value(transaction, 'canceled_at')
if canceled:
return 'canceled'
processed = self.safe_value(transaction, 'processed_at')
completed = self.safe_value(transaction, 'completed_at')
if completed:
return 'ok'
elif processed and not completed:
| return 'failed'
else:
return 'pending'
def parse_transaction(self, transaction, currency=None):
details = self.safe_value(transaction, 'details', {})
id = self.safe_string(transaction, 'id')
txid = self.safe_string(details, 'crypto_transaction_hash')
timestamp = self.parse8601(self.safe_string(transaction, 'created_at'))
updated = self.parse8601(self.safe_string(transaction, 'processed_at'))
currencyId = self.safe_string(transaction, 'currency')
code = self.safe_currency_code(currencyId, currency)
status = self.parse_transaction_status(transaction)
amount = self.safe_number(transaction, 'amount')
type = self.safe_string(transaction, 'type')
address = self.safe_string(details, 'crypto_address')
tag = self.safe_string(details, 'destination_tag')
address = self.safe_string(transaction, 'crypto_address', address)
fee = None
if type == 'withdraw':
type = 'withdrawal'
address = self.safe_string(details, 'sent_to_address', address)
feeCost = self.safe_number(details, 'fee')
if feeCost is not None:
if amount is not None:
amount -= feeCost
fee = {
'cost': feeCost,
'currency': code,
}
return {
'info': transaction,
'id': id,
'txid': txid,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'network': None,
'address': address,
'addressTo': None,
'addressFrom': None,
'tag': tag,
'tagTo': None,
'tagFrom': None,
'type': type,
'amount': amount,
'currency': code,
'status': status,
'updated': updated,
'fee': fee,
}
def create_deposit_address(self, code, params={}):
self.load_markets()
currency = self.currency(code)
accounts = self.safe_value(self.options, 'coinbaseAccounts')
if accounts is None:
accounts = self.privateGetCoinbaseAccounts()
self.options['coinbaseAccounts'] = accounts # cache it
self.options['coinbaseAccountsByCurrencyId'] = self.index_by(accounts, 'currency')
currencyId = currency['id']
account = self.safe_value(self.options['coinbaseAccountsByCurrencyId'], currencyId)
if account is None:
# eslint-disable-next-line quotes
raise InvalidAddress(self.id + " fetchDepositAddress() could not find currency code " + code + " with id = " + currencyId + " in self.options['coinbaseAccountsByCurrencyId']")
request = {
'id': account['id'],
}
response = self.privatePostCoinbaseAccountsIdAddresses(self.extend(request, params))
address = self.safe_string(response, 'address')
tag = self.safe_string(response, 'destination_tag')
return {
'currency': code,
'address': self.check_address(address),
'tag': tag,
'info': response,
}
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
request = '/' + self.implode_params(path, params)
query = self.omit(params, self.extract_params(path))
if method == 'GET':
if query:
request += '?' + self.urlencode(query)
url = self.implode_hostname(self.urls['api'][api]) + request
if api == 'private':
self.check_required_credentials()
nonce = str(self.nonce())
payload = ''
if method != 'GET':
if query:
body = self.json(query)
payload = body
what = nonce + method + request + payload
secret = None
try:
secret = self.base64_to_binary(self.secret)
except Exception as e:
raise AuthenticationError(self.id + ' sign() invalid base64 secret')
signature = self.hmac(self.encode(what), secret, hashlib.sha256, 'base64')
headers = {
'CB-ACCESS-KEY': self.apiKey,
'CB-ACCESS-SIGN': signature,
'CB-ACCESS-TIMESTAMP': nonce,
'CB-ACCESS-PASSPHRASE': self.password,
'Content-Type': 'application/json',
}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def handle_errors(self, code, reason, url, method, headers, body, response, requestHeaders, requestBody):
if (code == 400) or (code == 404):
if body[0] == '{':
message = self.safe_string(response, 'message')
feedback = self.id + ' ' + message
self.throw_exactly_matched_exception(self.exceptions['exact'], mess |
from django.template import Library
from django.utils.functional import Promise
import json
register = Library()
@register.filter
def json_dumps(json_object):
if is | instance(json_object, Promise):
json_object = dict(json_object)
return json.dumps(json_object)
@register.filter
def json_dumps_pr | etty(json_object):
if isinstance(json_object, Promise):
json_object = dict(json_object)
return json.dumps(json_object, indent=4, separators=(',', ': '))
|
# Copyright (c) 2014, The Boovix authors that are listed
# in the AUTHORS file. All right | s reserved. Use of this
# source code is governed by the BSD 3-clause license that
# can be found in the LICENSE file.
"""
Function annotations in Python 3:
http://legacy.python.o | rg/dev/peps/pep-3107/
Type checking in Python 3:
* http://code.activestate.com/recipes/578528
* http://stackoverflow.com/questions/1275646
mypy static type checking during compilation:
https://mail.python.org/pipermail/python-ideas/2014-August/028618.html
from typing import List, Dict
def word_count(input: List[str]) -> Dict[str, int]:
result = {} #type: Dict[str, int]
for line in input:
for word in line.split():
result[word] = result.get(word, 0) + 1
return result
Note that the #type: comment is part of the mypy syntax
"""
|
# -*- coding: utf-8 -*-
REPO_BACKENDS = {}
REPO_TYPES = []
class RepositoryTypeNotAvailable(Exception):
| pass
try:
from brigitte.backends import libgit
REPO_BACKENDS['git'] = libgit.Repo
REPO_TYPES.append(('git', 'GIT'))
except ImportError:
from brigitte.backends import git
REPO_BACKENDS['git'] = git.Repo
REPO_TYPES.append(('git', 'GIT'))
try:
from brigitte.backends import hg
REPO_BACKENDS['hg'] = hg.Repo
REPO | _TYPES.append(('hg', 'Mercurial'))
except ImportError:
pass
def get_backend(repo_type):
if not repo_type in REPO_BACKENDS:
raise RepositoryTypeNotAvailable(repo_type)
return REPO_BACKENDS[repo_type]
|
e repository version needs to be the one that is loaded:
sys.path.insert(0, os.path.abspath(os.path.join('..', '..', '..', 'lib')))
VERSION = '2.10'
AUTHOR = 'Ansible, Inc'
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings.
# They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
# TEST: 'sphinxcontrib.fulltoc'
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'pygments_lexer', 'notfound.extension']
# Later on, add 'sphinx.ext.viewcode' to the list if you want to have
# colorized code generated too for references.
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Ansible'
copyright = "2021 Red Hat, Inc."
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = VERSION
# The full version, including alpha/beta/rc tags.
release = VERSION
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
# unused_docs = []
# List of directories, relative to source directories, that shouldn't be
# searched for source files.
# exclude_dirs = []
# A list of glob-style patterns that should be excluded when looking
# for source files.
exclude_patterns = [
'2.10_index.rst',
'ansible_index.rst',
'core_index.rst',
'porting_guides/core_porting_guides',
]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current | module name will be | prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
highlight_language = 'YAML+Jinja'
# Substitutions, variables, entities, & shortcuts for text which do not need to link to anything.
# For titles which should be a link, use the intersphinx anchors set at the index, chapter, and section levels, such as qi_start_:
# |br| is useful for formatting fields inside of tables
# |_| is a nonbreaking space; similarly useful inside of tables
rst_epilog = """
.. |br| raw:: html
<br>
.. |_| unicode:: 0xA0
:trim:
"""
# Options for HTML output
# -----------------------
html_theme_path = ['../_themes']
html_theme = 'sphinx_rtd_theme'
html_short_title = 'Ansible Documentation'
html_show_sphinx = False
html_theme_options = {
'canonical_url': "https://docs.ansible.com/ansible/latest/",
'vcs_pageview_mode': 'edit'
}
html_context = {
'display_github': 'True',
'github_user': 'ansible',
'github_repo': 'ansible',
'github_version': 'devel/docs/docsite/rst/',
'github_module_version': 'devel/lib/ansible/modules/',
'github_root_dir': 'devel/lib/ansible',
'github_cli_version': 'devel/lib/ansible/cli/',
'current_version': version,
'latest_version': '2.10',
# list specifically out of order to make latest work
'available_versions': ('latest', '2.9', '2.9_ja', '2.8', 'devel'),
'css_files': ('_static/ansible.css', # overrides to the standard theme
),
}
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
# html_style = 'solar.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Ansible Documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (within the static path) to place at the top of
# the sidebar.
# html_logo =
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['../_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_use_modindex = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
html_copy_source = False
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = 'https://docs.ansible.com/ansible/latest'
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Poseidodoc'
# Configuration for sphinx-notfound-pages
# with no 'notfound_template' and no 'notfound_context' set,
# the extension builds 404.rst into a location-agnostic 404 page
#
# default is `en` - using this for the sub-site:
notfound_default_language = "ansible"
# default is `latest`:
# setting explicitly - docsite serves up /ansible/latest/404.html
# so keep this set to `latest` even on the `devel` branch
# then no maintenance is needed when we branch a new stable_x.x
notfound_default_version = "latest"
# makes default setting explicit:
notfound_no_urls_prefix = False
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
# latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
# latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class
# [howto/manual]).
latex_documents = [
('index', 'ansible.tex', 'Ansible 2.2 Documentation', AUTHOR, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# Additional stuff for the LaTeX preamble.
# latex_preamble = ''
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_use_modindex = True
autoclass_content = 'both'
# Note: Our strategy for intersphinx mappings is to have the upstream build location as the
# canonical source and then cached copies of the mapping stored locally in case someone is building
# when disconnected from the internet. We then have a script to update the cached copies.
#
# Because of that, each entry in this mapping should have this format:
# name: ('http://UPSTREAM_URL', (None, 'path/to/local/cache.inv'))
#
# The update script depends on this format so deviating from this (for instance, adding a third
# location for the mappning to live) will confuse it.
intersphinx_mapping = {'python': |
"""
WSGI config for twitter-tools project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on thi | s file, see
https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "twitter-tools.settings")
application = get_wsgi_applic | ation()
|
# Copyright 2008-2013 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from robot.errors import DataError
class XmlElementHandler(object):
def __init__(self, execution_result, root_handler=None):
self._stack = [(execution_result, root_handler or RootHandler())]
def start(self, elem):
result, handler = self._stack[-1]
self._stack.append(handler.handle_child(elem, result))
def end(self, elem):
result, handler = self._stack.pop()
handler.end(elem, result)
class _Handler(object):
def __init__(self):
self._child_map = dict((c.tag, c) for c in self._children())
def _children(self):
return []
def handle_child(self, elem, result):
try:
handler = self._child_map[elem.tag]
except KeyError:
raise DataError("Incompatible XML element '%s'" % elem.tag)
return handler. | start(elem, result), handler
def start(self, elem, result):
return result
def end(self, elem, result):
pass
def _timestamp(self, elem, attr_name):
timesta | mp = elem.get(attr_name)
return timestamp if timestamp != 'N/A' else None
class RootHandler(_Handler):
def _children(self):
return [RobotHandler()]
class RobotHandler(_Handler):
tag = 'robot'
def start(self, elem, result):
generator = elem.get('generator', 'unknown').split()[0].upper()
result.generated_by_robot = generator == 'ROBOT'
return result
def _children(self):
return [RootSuiteHandler(), StatisticsHandler(), ErrorsHandler()]
class SuiteHandler(_Handler):
tag = 'suite'
def start(self, elem, result):
return result.suites.create(name=elem.get('name'),
source=elem.get('source', ''))
def _children(self):
return [DocHandler(), MetadataHandler(), SuiteStatusHandler(),
KeywordHandler(), TestCaseHandler(), self]
class RootSuiteHandler(SuiteHandler):
def start(self, elem, result):
result.suite.name = elem.get('name')
result.suite.source = elem.get('source')
return result.suite
def _children(self):
return SuiteHandler._children(self)[:-1] + [SuiteHandler()]
class TestCaseHandler(_Handler):
tag = 'test'
def start(self, elem, result):
return result.tests.create(name=elem.get('name'),
timeout=elem.get('timeout'))
def _children(self):
return [DocHandler(), TagsHandler(), TestStatusHandler(), KeywordHandler()]
class KeywordHandler(_Handler):
tag = 'kw'
def start(self, elem, result):
return result.keywords.create(name=elem.get('name'),
timeout=elem.get('timeout'),
type=elem.get('type'))
def _children(self):
return [DocHandler(), ArgumentsHandler(), KeywordStatusHandler(),
MessageHandler(), self]
class MessageHandler(_Handler):
tag = 'msg'
def end(self, elem, result):
result.messages.create(elem.text or '',
elem.get('level'),
elem.get('html', 'no') == 'yes',
self._timestamp(elem, 'timestamp'))
class _StatusHandler(_Handler):
tag = 'status'
def _set_status(self, elem, result):
result.status = elem.get('status', 'FAIL')
def _set_message(self, elem, result):
result.message = elem.text or ''
def _set_times(self, elem, result):
result.starttime = self._timestamp(elem, 'starttime')
result.endtime = self._timestamp(elem, 'endtime')
class KeywordStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_times(elem, result)
if result.type == result.TEARDOWN_TYPE:
self._set_message(elem, result)
class SuiteStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_message(elem, result)
self._set_times(elem, result)
class TestStatusHandler(_StatusHandler):
def end(self, elem, result):
self._set_status(elem, result)
self._set_message(elem, result)
self._set_times(elem, result)
class DocHandler(_Handler):
tag = 'doc'
def end(self, elem, result):
result.doc = elem.text or ''
class MetadataHandler(_Handler):
tag = 'metadata'
def _children(self):
return [MetadataItemHandler()]
class MetadataItemHandler(_Handler):
tag = 'item'
def end(self, elem, result):
result.metadata[elem.get('name')] = elem.text or ''
class TagsHandler(_Handler):
tag = 'tags'
def _children(self):
return [TagHandler()]
class TagHandler(_Handler):
tag = 'tag'
def end(self, elem, result):
result.tags.add(elem.text or '')
class ArgumentsHandler(_Handler):
tag = 'arguments'
def _children(self):
return [ArgumentHandler()]
class ArgumentHandler(_Handler):
tag = 'arg'
def end(self, elem, result):
result.args += (elem.text or '',)
class ErrorsHandler(_Handler):
tag = 'errors'
def start(self, elem, result):
return result.errors
def _children(self):
return [MessageHandler()]
class StatisticsHandler(_Handler):
tag = 'statistics'
def handle_child(self, elem, result):
return result, self
|
#!/usr/bin/env python
# ~*~ encoding: utf-8 ~*~
"""
This file is part of SOCSIM.
SOCSIM is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
SOCSIM is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with SOCSIM. If not, see <http://www.gnu.org/licenses/>.
"""
#===
# name input.py
# date: 2013AUG10
# prog: pr
# desc: input data from ini, export to meta & record
# copy: copyright (C) 2013 Peter Renshaw
#===
import configparser
im | port record
# ---
# read file
# generate data
# build meta
#
# build temp record of meta
# save to file
# ---
# * extact
# - sections
# - key, values
# * organise
# - meta
# - others
class ini:
def __init__(self, data, config):
self.data = data
self.store = []
self.c = config
def build(self):
"""build up store"""
if self.dat | a:
if self.meta():
if self.sections():
return True
return False
def meta(self):
"""parse meta section"""
if 'meta' in self.c.sections():
meta = self.c.sections()[0]
self.store.append(meta)
return True
return False
def section(self, name):
"""parse info section by name"""
print(dir(name))
return True
def sections(self):
"""extract all sections"""
for section in self.c.sections():
self.section(section)
return True
def all(self):
"""return all sections in store"""
return self.store
def main():
pass
if __name__ == "__main__":
main()
# vim: ff=unix:ts=4:sw=4:tw=78:noai:expandtab
|
ons.add_argument("--psdvar_long_segment", type=float,
meta | var="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_overlap", type=float, metavar="SECONDS", help="Sample length of the PSD.")
psd_options.add_argument("--psdvar_low_freq", type=float, metavar="HERTZ", help="Minimum frequency to consider in PSD "
"comparison.")
psd_options.add_argument("--psdvar | _high_freq", type=float, metavar="HERTZ", help="Maximum frequency to consider in PSD "
"comparison.")
if include_data_options :
psd_options.add_argument("--psd-estimation",
help="Measure PSD from the data, using "
"given average method.",
choices=["mean", "median", "median-mean"])
psd_options.add_argument("--psd-segment-length", type=float,
help="(Required for --psd-estimation) The "
"segment length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float,
help="(Required for --psd-estimation) "
"The separation between consecutive "
"segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, default=None,
help="(Optional, used only with "
"--psd-estimation). If given, PSDs will "
"be estimated using only this number of "
"segments. If more data is given than "
"needed to make this number of segments "
"then excess data will not be used in "
"the PSD estimate. If not enough data "
"is given, the code will fail.")
if output:
psd_options.add_argument("--psd-output",
help="(Optional) Write PSD to specified file")
return psd_options
def insert_psd_option_group_multi_ifo(parser):
"""
Adds the options used to call the pycbc.psd.from_cli function to an
optparser as an OptionGroup. This should be used if you
want to use these options in your code.
Parameters
-----------
parser : object
OptionParser instance.
"""
psd_options = parser.add_argument_group(
"Options to select the method of PSD generation",
"The options --psd-model, --psd-file, --asd-file, "
"and --psd-estimation are mutually exclusive.")
psd_options.add_argument("--psd-model", nargs="+",
action=MultiDetOptionAction, metavar='IFO:MODEL',
help="Get PSD from given analytical model. "
"Choose from %s" %(', '.join(get_psd_model_list()),))
psd_options.add_argument("--psd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given PSD ASCII file")
psd_options.add_argument("--asd-file", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Get PSD using given ASD ASCII file")
psd_options.add_argument("--psd-estimation", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="Measure PSD from the data, using given "
"average method. Choose from "
"mean, median or median-mean.")
psd_options.add_argument("--psd-segment-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Required for --psd-estimation) The segment "
"length for PSD estimation (s)")
psd_options.add_argument("--psd-segment-stride", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:STRIDE',
help="(Required for --psd-estimation) The separation"
" between consecutive segments (s)")
psd_options.add_argument("--psd-num-segments", type=int, nargs="+",
default=None,
action=MultiDetOptionAction, metavar='IFO:NUM',
help="(Optional, used only with --psd-estimation). "
"If given PSDs will be estimated using only "
"this number of segments. If more data is "
"given than needed to make this number of "
"segments than excess data will not be used in "
"the PSD estimate. If not enough data is given "
"the code will fail.")
psd_options.add_argument("--psd-inverse-length", type=float, nargs="+",
action=MultiDetOptionAction, metavar='IFO:LENGTH',
help="(Optional) The maximum length of the impulse"
" response of the overwhitening filter (s)")
psd_options.add_argument("--psd-output", nargs="+",
action=MultiDetOptionAction, metavar='IFO:FILE',
help="(Optional) Write PSD to specified file")
# Options for PSD variation
psd_options.add_argument("--psdvar_short_segment", type=float,
metavar="SECONDS", help="Length of short segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_long_segment", type=float,
metavar="SECONDS", help="Length of long segment "
"when calculating the PSD variability.")
psd_options.add_argument("--psdvar_overlap", type=float, metavar="SECONDS", help="Sample length of the PSD.")
psd_options.add_argument("--psdvar_low_freq", type=float, metavar="HERTZ", help="Minimum frequency to consider in PSD "
"comparison.")
psd_options.add_argument("--psdvar_high_freq", type=float, metavar="HERTZ", help="Maximum frequency to consider in PSD "
"comparison.")
return psd_options
ensure_one_opt_groups = []
ensure_one_opt_groups.append(['--psd-file', '--psd-model',
'--psd-estimation', '--asd-file'])
def verify_psd_options(opt, parser):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_inverse_length, psd_output).
parser : object
OptionParser instance.
"""
try:
psd_estimation = opt.psd_estimation is not None
except AttributeError:
psd_estimation = False
for opt_group in ensure_one_opt_groups:
ensure_one_opt(opt, parser, opt_group)
if psd_estimation:
required_opts(opt, parser,
['--psd-segment-stride', '--psd-segment-length'],
required_by = "--psd-estimation")
def verify_psd_options_multi_ifo(opt, parser, ifos):
"""Parses the CLI options and verifies that they are consistent and
reasonable.
Parameters
----------
opt : object
Result of parsing the CLI with OptionParser, or any object with the
required attributes (psd_model, psd_file, asd_file, psd_estimation,
psd_segment_length, psd_segment_stride, psd_ |
try:
from account.decorators import login_required
except ImportError:
from djan | go.contrib.auth.decorators import login_required # noqa
try:
from account.mixins import LoginRequiredMixin
except ImportError:
from django.contrib.auth.mixins import LoginRequiredMixin # noqa
| |
a compiler expects
to see.
'''
def __init__(self, member_node):
self.node = member_node
def process(self, callbacks):
properties = OrderedDict()
name = self.node.GetName()
if self.node.GetProperty('deprecated'):
properties['deprecated'] = self.node.GetProperty('deprecated')
for property_name in ('OPTIONAL', 'nodoc', 'nocompile', 'nodart'):
if self.node.GetProperty(property_name):
properties[property_name.lower()] = True
for option_name, sanitizer in [
('maxListeners', int),
('supportsFilters', lambda s: s == 'true'),
('supportsListeners', lambda s: s == 'true'),
('supportsRules', lambda s: s == 'true')]:
if self.node.GetProperty(option_name):
if 'options' not in properties:
properties['options'] = {}
properties['options'][option_name] = sanitizer(self.node.GetProperty(
option_name))
is_function = False
parameter_comments = OrderedDict()
for node in self.node.GetChildren():
if node.cls == 'Comment':
(parent_comment, parameter_comments) = ProcessComment(node.GetName())
properties['description'] = parent_comment
elif node.cls == 'Callspec':
is_function = True
name, parameters, return_type = (Callspec(node, parameter_comments)
.process(callbacks))
properties['parameters'] = parameters
if return_type is not None:
properties['returns'] = return_type
properties['name'] = name
if is_function:
properties['type'] = 'function'
else:
properties = Typeref(self.node.GetProperty('TYPEREF'),
| self.node, properties).process(callbacks)
enum_values = self.node.GetProperty('legalValues')
if enum_values:
if properties['type'] == 'integer':
enum_values = map(int, en | um_values)
elif properties['type'] == 'double':
enum_values = map(float, enum_values)
properties['enum'] = enum_values
return name, properties
class Typeref(object):
'''
Given a TYPEREF property representing the type of dictionary member or
function parameter, converts into a Python dictionary that the JSON schema
compiler expects to see.
'''
def __init__(self, typeref, parent, additional_properties):
self.typeref = typeref
self.parent = parent
self.additional_properties = additional_properties
def process(self, callbacks):
properties = self.additional_properties
result = properties
if self.parent.GetPropertyLocal('OPTIONAL'):
properties['optional'] = True
# The IDL parser denotes array types by adding a child 'Array' node onto
# the Param node in the Callspec.
for sibling in self.parent.GetChildren():
if sibling.cls == 'Array' and sibling.GetName() == self.parent.GetName():
properties['type'] = 'array'
properties['items'] = OrderedDict()
properties = properties['items']
break
if self.typeref == 'DOMString':
properties['type'] = 'string'
elif self.typeref == 'boolean':
properties['type'] = 'boolean'
elif self.typeref == 'double':
properties['type'] = 'number'
elif self.typeref == 'long':
properties['type'] = 'integer'
elif self.typeref == 'any':
properties['type'] = 'any'
elif self.typeref == 'object':
properties['type'] = 'object'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
instance_of = self.parent.GetProperty('instanceOf')
if instance_of:
properties['isInstanceOf'] = instance_of
elif self.typeref == 'ArrayBuffer':
properties['type'] = 'binary'
properties['isInstanceOf'] = 'ArrayBuffer'
elif self.typeref == 'FileEntry':
properties['type'] = 'object'
properties['isInstanceOf'] = 'FileEntry'
if 'additionalProperties' not in properties:
properties['additionalProperties'] = OrderedDict()
properties['additionalProperties']['type'] = 'any'
elif self.parent.GetPropertyLocal('Union'):
choices = []
properties['choices'] = [Typeref(node.GetProperty('TYPEREF'),
node,
OrderedDict()).process(callbacks)
for node in self.parent.GetChildren()
if node.cls == 'Option']
elif self.typeref is None:
properties['type'] = 'function'
else:
if self.typeref in callbacks:
# Do not override name and description if they are already specified.
name = properties.get('name', None)
description = properties.get('description', None)
properties.update(callbacks[self.typeref])
if description is not None:
properties['description'] = description
if name is not None:
properties['name'] = name
else:
properties['$ref'] = self.typeref
return result
class Enum(object):
'''
Given an IDL Enum node, converts into a Python dictionary that the JSON
schema compiler expects to see.
'''
def __init__(self, enum_node):
self.node = enum_node
self.description = ''
def process(self, callbacks):
enum = []
for node in self.node.GetChildren():
if node.cls == 'EnumItem':
enum_value = {'name': node.GetName()}
for child in node.GetChildren():
if child.cls == 'Comment':
enum_value['description'] = ProcessComment(child.GetName())[0]
else:
raise ValueError('Did not process %s %s' % (child.cls, child))
enum.append(enum_value)
elif node.cls == 'Comment':
self.description = ProcessComment(node.GetName())[0]
else:
sys.exit('Did not process %s %s' % (node.cls, node))
result = {'id' : self.node.GetName(),
'description': self.description,
'type': 'string',
'enum': enum}
for property_name in (
'inline_doc', 'noinline_doc', 'nodoc', 'cpp_omit_enum_type',):
if self.node.GetProperty(property_name):
result[property_name] = True
if self.node.GetProperty('deprecated'):
result[deprecated] = self.node.GetProperty('deprecated')
return result
class Namespace(object):
'''
Given an IDLNode representing an IDL namespace, converts into a Python
dictionary that the JSON schema compiler expects to see.
'''
def __init__(self,
namespace_node,
description,
nodoc=False,
internal=False,
platforms=None,
compiler_options=None,
deprecated=None):
self.namespace = namespace_node
self.nodoc = nodoc
self.internal = internal
self.platforms = platforms
self.compiler_options = compiler_options
self.events = []
self.functions = []
self.types = []
self.callbacks = OrderedDict()
self.description = description
self.deprecated = deprecated
def process(self):
for node in self.namespace.GetChildren():
if node.cls == 'Dictionary':
self.types.append(Dictionary(node).process(self.callbacks))
elif node.cls == 'Callback':
k, v = Member(node).process(self.callbacks)
self.callbacks[k] = v
elif node.cls == 'Interface' and node.GetName() == 'Functions':
self.functions = self.process_interface(node)
elif node.cls == 'Interface' and node.GetName() == 'Events':
self.events = self.process_interface(node)
elif node.cls == 'Enum':
self.types.append(Enum(node).process(self.callbacks))
else:
sys.exit('Did not process %s %s' % (node.cls, node))
if self.compiler_options is not None:
compiler_options = self.compiler_options
else:
compiler_options = {}
return {'namespace': self.namespace.GetName(),
'description': self.description,
'nodoc': self.nodoc,
'types': self.types,
'functions': self.functions,
'internal': sel |
# -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
import unittest
from pykit.parsing import cirparser
from pykit.ir import verify, interp
source = """
#include <pykit_ir.h>
Int32 myglobal = 10;
float simple(float x) {
return x * x;
}
Int32 loop() {
Int32 i, sum = 0;
for (i = 0; i < 10; i = i + 1) {
sum = sum + i;
}
retur | n sum;
}
Int32 raise() {
Exception exc = new_exc("TypeError", "");
exc_thro | w(exc);
return 0;
}
"""
mod = cirparser.from_c(source)
verify(mod)
class TestInterp(unittest.TestCase):
def test_simple(self):
f = mod.get_function('simple')
result = interp.run(f, args=[10.0])
assert result == 100.0, result
def test_loop(self):
loop = mod.get_function('loop')
result = interp.run(loop)
assert result == 45, result
def test_exceptions(self):
f = mod.get_function('raise')
try:
result = interp.run(f)
except interp.UncaughtException as e:
exc, = e.args
assert isinstance(exc, TypeError), exc
else:
assert False, result
if __name__ == '__main__':
unittest.main() |
"""
Trabalho T2 da disciplina Teoria dos Grafos, ministrada em 2014/02
'All Hail Gabe Newell'
Alunos:
Daniel Nobusada 344443
Thales Eduardo Adair Menato 407976
Jorge Augusto Bernardo 407844
"""
import networkx as nx
import numpy as np
import plotly.plotly as py
from plotly.graph_objs import *
py.sign_in("thamenato", "aq0t3czzut")
# Importa grafo Zachary's Karate Club
graphG = nx.read_gml('karate.gml')
"""
1) Computacao da distribuicao estacionaria teorica (steady state) do grafo
w(i) = d(vi) / 2|E|
"""
w_real = []
for i in graphG.nodes_iter():
aux = float(graphG.degree(i)) / float((2 * graphG.number_of_edges()))
w_real.append(aux)
"""
2) Calcular The Power Method
http://college.cengage.com/mathematics/larson/elementary_linear/4e/shared/
downloads/c10s3.pdf
"""
# Matriz P recebe a matriz de adjacencia de matrixG
matrixP = nx.adjacency_matrix(graphG)
# A soma de cada linha eh calculado
sum_linha = []
for i in matrixP:
sum_linha.append(i.sum())
# Para cada p(i,j) de P temos p(i,j) = p(i,j)/sum_linha(i)
for i in range(0, matrixP.shape[0]):
for j in range(0, matrixP.shape[1]):
matrixP[i, j] = float(matrixP[i, j]) / float(sum_linha[i])
# Vetor w_inicial onde a soma eh 1 com divisao de probabilidade 1/G.order()
# Para o grafo utilizado G.order() = 34
w_inicial = np.array([1.0/float(graphG.order())
for i in range(0, graphG.order())])
# Calcular w_power5
w_power5 = np.dot(w_inicial, matrixP)
for i in range(0, 4):
w_power5 = np.dot(w_power5, matrixP)
# Calcular w_power100
w_power100 = np.dot(w_inicial, matrixP)
for i in range(0, 99):
w_power100 = np.dot(w_power100, matrixP)
# A soma de todos os elementos destes vetores eh 1
"""
3) Escolha de 2 vertices distintos e realizar a caminhada aleatoria de ambos
"""
# Funcao Random Walk
def random_walk(node, numPassos):
# Vetor contendo numero de posicoes = numeros de vertices(noh)
caminhada = [0.0 for i in range(0, graphG.number_of_nodes())]
# Para o numero de passos desejado, uma lista contendo os vizinhos sera armazenada
# um indice aleatorio desta lista eh selecionado como proximo noh que entao passa
# a ser o noh atual e numero de visitar naquele noh eh incrementado
for i in range(0, numPassos):
vizinhos = graphG.neighbors(node)
proxNo = vizinhos[np.random.randint(0, len(vizinhos))]
node = proxNo
caminhada[node-1] += 1
# Realiza a divisao pelo numero de passos em todos os numeros de lista
for i in range(0, len(caminhada)):
caminhada[i] /= numPassos
# Retorna vetor contendo o numero de passadas / num de passos em cada vertice (noh)
return caminhada
# Escolha de dois vertices (noh) aleatorios
nodeA = np.random.random_integers(1, graphG.number_of_nodes())
nodeB = np.random.random_integers(1, graphG.number_of_nodes())
# Caso vertice B seja igual a A, receber outros numeros ateh que sejam distintos
while nodeB is nodeA:
nodeB = np.random. | random_integers(1, graphG.number_of_nodes())
# 2 caminhadas aleatorias de tamanho N = 100
w_random100a = random_walk(nodeA, 100)
w_random100b = random_walk(nodeB, 100)
# 2 caminhadas aleatorias de tamanho N = 10000
w_random10000a = random_walk(nodeA, 10000)
w_random10000b = random_walk(nodeB, 10000)
# Print no console de todos os | dados obtidos
print "w_power5: "
w_power5_lista = []
for i in range(0, w_power5.size):
w_power5_lista.append('%.4f'%w_power5[0, i])
print w_power5_lista
print "w_power100: "
w_power100_lista = []
for i in range(0, w_power100.size):
w_power100_lista.append('%.4f'%w_power100[0, i])
print w_power100_lista
print "w_random100a:"
print w_random100a
print "w_random100b:"
print w_random100b
print "w_random10000a:"
print w_random10000a
print "w_random10000b:"
print w_random10000b
# Para plotar no link: https://plot.ly/~thamenato/2/t2-random-walk/
# basta descomentar e executar o codigo novamente
# Tem de instalar a biblioteca (https://plot.ly/python/getting-started/)
# no Windows eh soh abrir o menu do Python(x,y) e escolher interactive consoles: IPython(sh)
# e executar: pip install plotly
"""
trace_power5 = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_power5)),
name = 'w_power5',
marker = Marker(
color='rgb(51,102,255)'
)
)
trace_power100 = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_power100)),
name = 'w_power100',
marker = Marker(
color='rgb(0,184,245)'
)
)
trace_random100a = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random100a)),
name = 'w_random100a',
marker = Marker(
color='rgb(138,184,0)'
)
)
trace_random100b = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random100b)),
name = 'w_random100b',
marker = Marker(
color='rgb(184,245,0)'
)
)
trace_random10000a = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random10000a)),
name = 'w_random10000a',
marker = Marker(
color='rgb(245,184,0)'
)
)
trace_random10000b = Bar(
x = graphG.nodes(),
y = np.squeeze(np.asarray(w_random10000b)),
name = 'w_random10000b',
marker = Marker(
color='rgb(255,102,51)'
)
)
data = Data([trace_power5, trace_power100, trace_random100a,
trace_random100b, trace_random10000a, trace_random10000b])
layout = Layout(
title = 'T2: Random Walk',
xaxis = XAxis(
title = 'Nodes',
titlefont = Font(
size = 16,
color = 'rgb(107, 107, 107)'
),
tickfont = Font(
size = 14,
color = 'rgb(107, 107, 107)'
)
),
yaxis = YAxis(
title = 'Probability',
titlefont = Font(
size = 16,
color = 'rgb(107, 107, 107)'
),
tickfont = Font(
size = 14,
color = 'rgb(107, 107, 107)'
)
),
legend = Legend(
x = 0.25,
y = 1.0,
bgcolor = 'rgba(255, 255, 255, 0)',
bordercolor = 'rgba(255, 255, 255, 0)'
),
barmode = 'group',
bargap = 0.15,
bargroupgap = 0.1
)
fig = Figure(data = data, layout = layout)
plot_url = py.plot(fig, filename='T2_Random_Walks')
"""
|
# Copyright 2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License | .
import proboscis
from trove.tests.api import backups
from trove.tests.api import configurations
from trove.tests.api import databases
from trove.tests.api import datastores
from trove.tests.api import flavors
from trove.tests.api import instances
from trove.tests.api import instances_actions
from trove.tests.api.mgmt import accounts
from trove.tests.api.mgmt import admin_required
from trove.tests.api.mgmt impo | rt hosts
from trove.tests.api.mgmt import instances as mgmt_instances
from trove.tests.api.mgmt import storage
from trove.tests.api import replication
from trove.tests.api import root
from trove.tests.api import user_access
from trove.tests.api import users
from trove.tests.api import versions
GROUP_SERVICES_INITIALIZE = "services.initialize"
black_box_groups = [
flavors.GROUP,
users.GROUP,
user_access.GROUP,
databases.GROUP,
root.GROUP,
GROUP_SERVICES_INITIALIZE,
instances.GROUP_START,
instances.GROUP_QUOTAS,
instances.GROUP_SECURITY_GROUPS,
backups.GROUP,
replication.GROUP,
configurations.GROUP,
datastores.GROUP,
instances_actions.GROUP_RESIZE,
# TODO(SlickNik): The restart tests fail intermittently so pulling
# them out of the blackbox group temporarily. Refer to Trove bug:
# https://bugs.launchpad.net/trove/+bug/1204233
# instances_actions.GROUP_RESTART,
instances_actions.GROUP_STOP_MYSQL,
instances.GROUP_STOP,
versions.GROUP,
instances.GROUP_GUEST,
]
proboscis.register(groups=["blackbox", "mysql"],
depends_on_groups=black_box_groups)
simple_black_box_groups = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
admin_required.GROUP,
]
proboscis.register(groups=["simple_blackbox"],
depends_on_groups=simple_black_box_groups)
black_box_mgmt_groups = [
accounts.GROUP,
hosts.GROUP,
storage.GROUP,
instances_actions.GROUP_REBOOT,
admin_required.GROUP,
mgmt_instances.GROUP,
]
proboscis.register(groups=["blackbox_mgmt"],
depends_on_groups=black_box_mgmt_groups)
# Datastores groups for int-tests
datastore_group = [
GROUP_SERVICES_INITIALIZE,
flavors.GROUP,
versions.GROUP,
instances.GROUP_START_SIMPLE,
]
proboscis.register(groups=["cassandra", "couchbase", "mongodb", "postgresql"],
depends_on_groups=datastore_group)
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from __future__ import print_function
import sys
import os
from gpu_tests import gpu_integration_test
test_harness_script = r"""
function VerifyHardwareAccelerated(feature) {
feature += ': '
var list = document.querySelector('.feature-status-list');
for (var i=0; i < list.childElementCount; i++) {
var span_list = list.children[i].getElementsByTagName('span');
var feature_str = span_list[0].textContent;
var value_str = span_list[1].textContent;
if ((feature_str == feature) &&
(value_str == 'Hardware accelerated')) {
return true;
}
}
return false;
};
"""
def safe_feature_name(feature):
return feature.lower().replace(' ', '_')
class HardwareAcceleratedFeatureIntegrationTest(
gpu_integration_test.GpuIntegrationTest):
"""Tests GPU acceleration is reported as active for various features."""
@classmethod
def Name(cls):
"""The name by which this test is invoked on the command line."""
return 'hardware_accelerated_feature'
@classmethod
def SetUpProcess(cls):
super(cls, HardwareAcceleratedFeatureIntegrationTest).SetUpProcess()
cls.CustomizeBrowserArgs([])
cls.StartBrowser()
cls.SetStaticServerDirs([])
def _Navigate(self, url):
# It's crucial to use the action_runner, rather than the tab's
# Navigate method directly. It waits for the document ready state
# to become interactive or better, avoiding critical race
# conditions.
self.tab.action_runner.Navigate(
url, script_to_evaluate_on_commit=test_harness_script)
@classmethod
def GenerateGpuTests(cls, options):
tests = ('WebGL', 'Canvas')
for feature in tests:
yield ('HardwareAcceleratedFeature_%s_accelerated' %
safe_feature_name(feature), 'c | hrome://gpu', (feature))
def RunActualGpuTest(self, test_path, *args):
feature = args[0]
self._Navigate(test_path)
tab = self.tab
tab.WaitForJavaScriptCondition('window.gpuPagePopulated', timeout=30)
if not tab.EvaluateJavaScript(
'VerifyHardwareAccelerated({{ feature }})', feature=feature):
print('Test failed. Printing page contents:')
print(tab.Evalua | teJavaScript('document.body.innerHTML'))
self.fail('%s not hardware accelerated' % feature)
@classmethod
def ExpectationsFiles(cls):
return [
os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'test_expectations',
'hardware_accelerated_feature_expectations.txt')
]
def load_tests(loader, tests, pattern):
del loader, tests, pattern # Unused.
return gpu_integration_test.LoadAllTestsInModule(sys.modules[__name__])
|
from django.db import models
class AssetField(models.ForeignKey):
description = 'A file asset'
def __init__(self, **kwargs):
kwargs.setdefault('blank', True)
kwargs.setdefault('null', True)
kwargs.setdefault('default', None)
# Normally, Django creates a backwards relationship, so you can have
#
# class Student(models.Model):
# classroom = models.ForeignKey(Classroom)
#
# and then access students like this:
#
# classroom.student_set.filter(...)
#
# This doesn't work when you have two ForeignKeys that point at the same
# model:
#
# class Student(models.Model):
# study_hall_room = models.ForeignKey(Classroom)
# homeroom = models.ForeignKey(Classroom)
#
# Because now the backwards relationship has the same name
# (`student_set`) on Classroom. We don't want this for assets, so we
# strip it off by setting `related_name` to `+`.
kwargs.setdefau | lt('related_name', '+')
kwargs['to'] = 'assets.Asset'
kwargs['on_delete'] = | models.SET_NULL
super(AssetField, self).__init__(**kwargs)
|
t(self.zFar)
elif form_data['side'] =='on':
self.side = 'right'
self.shoulder = 'RShoulderPitch'
self.pointing_dict = self.pointing_right
self.zNear = -(math.atan(self.y/self.near)+math.pi/2)
self.zFar = -(math.atan(self.y/self.far)+math.pi/2)
print(self.side)
return
#calculates pointing angles vector = {'un':[angleZ,angleShoulder],'uf':[angleZ,angleShoulder],'ln':[angleZ,angleShoulder], 'lf':[angleZ,angleShoulder]}
def calculate_angles():
self.upperNear = -math.atan(self.upper/(math.sqrt(math.pow(self.near,2)+math.pow(self.y,2))))
self.upperFar = -math.atan(self.upper/(math.sqrt(math.pow(self.far,2)+math.pow(self.y,2))))
self.lowerNear = -math.atan(self.lower/(math.sqrt(math.pow(self.near,2)+math.pow(self.y,2))))
self.lowerFar = -math.atan(self.lower/(math.sqrt(math.pow(self.far,2)+math.pow(self.y,2))))
self.angles = {'un':[self.zNear,self.upperNear],'uf':[self.zFar,self.upperFar],'ln':[self.zNear,self.lowerNear], 'lf':[self.zFar,self.lowerFar]}
print(self.angles)
return
#read the json data from the form (ip, file, start)
form_data = json.loads(json_data)
self.my_path = form_data['file']
self.my_ip = form_data['ip']
self.start_slide = form_data['start']
self.point_check = form_data['enable_point']
if self.point_check == '':
self.point_enabled = False
#if gesture is enable then get the coordinates variables
elif self.point_check == 'on':
self.point_enabled = True
retrieve_pointing_data()
calculate_angles()
#initialize the presentation and variables
self.start_slide = int(self.start_slide)
self.current_slide = self.start_slide
self.slides_path = os.path.dirname(self.my_path) + '/slides/slide'
self.my_pres = Presentation(self.my_path)
self.number_of_slides = len(self.my_pres.slides)
notes = []
#connect to the robot and show initial slide
#COMENT THIS WHEN TESTING OUTISDE ROBOT
########################################################################
self.tts = ALProxy("ALAnimatedSpeech", str(self.my_ip), 9559)
self.motion = ALProxy("ALMotion", str(self.my_ip), 9559)
self.posture = ALProxy("ALRobotPosture", str(self.my_ip), 9559)
self.aw = ALProxy("ALBasicAwareness", str(self.my_ip), 9559)
self.motion.moveInit()
self.motion.setStiffnesses(self.shoulder, 1)
self.posture.goToPosture("StandInit", 0.5)
self.aw.setTrackingMode("Head")
self.aw.setEngagementMode("Unengaged")
# self.aw.resumeAwareness()
self.a | w.pauseAwareness()
########################################################################
slide_src = self.slides_path + str(self.start_slide) + '.jpg'
self.app.evalua | te_javascript("document.getElementById('presentation_image').style.display='block'")
self.app.evaluate_javascript("document.getElementById('presentation_content').innerHTML = 'Log:<br>Starting presentation at: %s<br>IP: %s<br>Notes: '" %(self.my_path, self.my_ip))
self.app.evaluate_javascript("document.getElementById('slide').src = '%s'" %(slide_src))
self.app.evaluate_javascript("document.getElementById('presentation_image').style.display = 'block'")
self.app.evaluate_javascript("scroll(0,0)")
print('Showing slide ' + str(self.current_slide) +'. Source: '+ slide_src)
#the calculations of angles for the pointing function should be done here, define them as self variables to access from present_slide
return
############################################################
############################################################
#
# PRESENTING SLIDE (ON CLICK SLIDE)
#
############################################################
############################################################
@htmlPy.Slot()
def present_slide(self):
self.aw.setTrackingMode("Head")
self.aw.setEngagementMode("Unengaged")
#this will use the dictionary to check for gestures FIX IT TO ONLY ADD ONE CLOSING TAG
def check_gestures(text):
for key in self.gestures_dict:
if text.find(key) != -1:
# text = text.replace(key, "^startTag(" + self.gestures_dict[key] + ")" ) + " ^stopTag(" + self.gestures_dict[key] + ")"
#here we try to make the emotion at the end of the sentence
#not sure if the double \\ is needed as escape character
text = '^call(ALLeds.fadeRGB("FaceLeds", "'+ self.leds[key] + '", 3)) ' + text.replace(key, "" ) + ' \\pau=1000\\ '+ ' ^startTag(' + self.gestures_dict[key] + ')' + ' ^waitTag(' + self.gestures_dict[key] + ')'
return text
def check_point(text):
for key in self.pointing_dict:
if text.find(key) != -1:
text = text.replace(key, "")
print('Pointing to ' + self.pointing_dict[key])
#COMMENT THIS WHEN TESTING OUTISDE ROBOT
####################################################################
# self.aw.pauseAwareness()
####################################################################
#point to that position
point(self.pointing_dict[key])
return text
def point(position):
self.angles_vector = self.angles[position] #now we have the vector with the Z angle and the shoulder angle for a given position
#COMMENT THIS WHEN TESTING OUTISDE ROBOT
####################################################################
self.motion.setStiffnesses(self.shoulder, 1)
self.motion.moveTo(0,0,self.angles_vector[0])
self.motion.setAngles(self.shoulder, self.angles_vector[1],0.3)
####################################################################
self.isRobotPointing = True
return
#with calls inside the text
################################################################################
# def point(position, text):
# self.angles_vector = self.angles[position] #now we have the vector with the Z angle and the shoulder angle for a given position
# my_z_str = str(self.angles_vector[0])
# my_shoulder_str = str(self.angles_vector[1])
# #^pcall() = asynchronous, ^call() = ^synchronous
# animated_text = '^call(ALMotion.moveTo(0,0,'+ my_z_str+')) ^call(ALMotion.setAngles("' + self.shoulder + '",' + my_shoulder_str+',0.3))' + text
# self.isRobotPointing = True
# return animated_text
#ORiginal code, the upper one is modified
################################################################################
# def check_point(text):
# for key in self.pointing_dict:
# if text.find(key) != -1:
# text = text.replace(key, "")
# print('Pointing to ' + self.pointing_dict[key])
# #point to that position
# point(self.pointing_dict[key])
# return text
# #the function to point should be added here also
# def point(position):
# self.angles_vector = self.angles[position] #now we have the vector with the Z angle and the shoulder angle for a given position
# #COMMENT THIS WHEN TESTING OUTISDE ROBOT
# ####################################################################
# self.motion.setStiffnesses(self.shoulder, 1)
# self.motion.moveTo(0,0,self.angles_vector[0])
# self.motion.setAngles(self.shoulder, self.angles_vector[1],0.5)
# ####################################################################
# self.isRobotPointing = True
# return
###########################################################################################
#the slide is showing, so when you click on it it will read the notes of the slide
#if it is not the last one it will show the next slide, if it is the last one will elapse some time and close the image view
slide = self.my_pres.slides[self.current_slide-1]
if slide.has_notes_slide:
notes_slide = slide.notes_slide
text_frame = notes_slide.notes_text_frame
for paragraph in text_frame.paragraphs:
if self.point_enabled:
after_pointing_txt = check_point(paragraph.text)
else:
after_pointing_txt = paragraph.text
modified_text = check_gestures(after_pointing_txt)
self.app.evaluate_javascript("document.getElementById('presentation_content').innerHTML += '<br> %s - %s '" %(paragraph.text, modified_text))
print('Notes line of slide ' + str(self.current_slide) +': ' + paragraph.text)
print('Modified notes line of slide ' + str(self.current_slide) +': ' + modified_text)
#COMMENT THIS WHEN T |
yright 2002-2018, Neo4j
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from unittest import TestCase
from cypy.graph import Node, relationship_type, Path
from cypy.encoding import cypher_repr, cypher_escape
KNOWS = relationship_type("KNOWS")
LOVES = relationship_type("LOVES")
HATES = relationship_type("HATES")
KNOWS_FR = relationship_type(u"CONNAÎT")
class CypherEscapeTestCase(TestCase):
def test_can_write_simple_identifier(self):
escaped = cypher_escape("foo")
assert escaped == "foo"
def test_can_write_identifier_with_odd_chars(self):
escaped = cypher_escape("foo bar")
assert escaped == "`foo bar`"
def test_can_write_identifier_containing_back_ticks(self):
escaped = cypher_escape("foo `bar`")
assert escaped == "`foo ``bar```"
def test_cannot_write_empty_identifier(self):
with self.assertRaises(ValueError):
_ = cypher_escape("")
def test_cannot_write_none_identifier(self):
with self.assertRaises(TypeError):
_ = cypher_escape(None)
class CypherNoneRepresentationTestCase(TestCase):
def test_should_encode_none(self):
encoded = cypher_repr(None)
assert encoded == u"null"
class CypherBooleanRepresentationTestCase(TestCase):
def test_should_encode_true(self):
encoded = cypher_repr(True)
assert encoded == u"true"
def test_should_encode_false(self):
encoded = cypher_repr(False)
assert encode | d == u"false"
class CypherIntegerRepresentationTestCase(TestCase):
def test_should_encode_zero(self):
| encoded = cypher_repr(0)
assert encoded == u"0"
def test_should_encode_positive_integer(self):
encoded = cypher_repr(123)
assert encoded == u"123"
def test_should_encode_negative_integer(self):
encoded = cypher_repr(-123)
assert encoded == u"-123"
class CypherFloatRepresentationTestCase(TestCase):
def test_should_encode_zero(self):
encoded = cypher_repr(0.0)
assert encoded == u"0.0"
def test_should_encode_positive_float(self):
encoded = cypher_repr(123.456)
assert encoded == u"123.456"
def test_should_encode_negative_float(self):
encoded = cypher_repr(-123.456)
assert encoded == u"-123.456"
class CypherStringRepresentationTestCase(TestCase):
def test_should_encode_bytes(self):
encoded = cypher_repr(b"hello, world")
assert encoded == u"'hello, world'"
def test_should_encode_unicode(self):
encoded = cypher_repr(u"hello, world")
assert encoded == u"'hello, world'"
def test_should_encode_bytes_with_escaped_chars(self):
encoded = cypher_repr(b"hello, 'world'", quote=u"'")
assert encoded == u"'hello, \\'world\\''"
def test_should_encode_unicode_with_escaped_chars(self):
encoded = cypher_repr(u"hello, 'world'", quote=u"'")
assert encoded == u"'hello, \\'world\\''"
def test_should_encode_empty_string(self):
encoded = cypher_repr(u"")
assert encoded == u"''"
def test_should_encode_bell(self):
encoded = cypher_repr(u"\a")
assert encoded == u"'\\u0007'"
def test_should_encode_backspace(self):
encoded = cypher_repr(u"\b")
assert encoded == u"'\\b'"
def test_should_encode_form_feed(self):
encoded = cypher_repr(u"\f")
assert encoded == u"'\\f'"
def test_should_encode_new_line(self):
encoded = cypher_repr(u"\n")
assert encoded == u"'\\n'"
def test_should_encode_carriage_return(self):
encoded = cypher_repr(u"\r")
assert encoded == u"'\\r'"
def test_should_encode_horizontal_tab(self):
encoded = cypher_repr(u"\t")
assert encoded == u"'\\t'"
def test_should_encode_double_quote_when_single_quoted(self):
encoded = cypher_repr(u"\"")
assert encoded == u"'\"'"
def test_should_encode_single_quote_when_single_quoted(self):
encoded = cypher_repr(u"'", quote=u"'")
assert encoded == u"'\\''"
def test_should_encode_double_quote_when_double_quoted(self):
encoded = cypher_repr(u"\"", quote=u"\"")
assert encoded == u'"\\""'
def test_should_encode_single_quote_when_double_quoted(self):
encoded = cypher_repr(u"'", quote=u"\"")
assert encoded == u'"\'"'
def test_should_encode_2_byte_extended_character(self):
encoded = cypher_repr(u"\xAB")
assert encoded == u"'\\u00ab'"
def test_should_encode_4_byte_extended_character(self):
encoded = cypher_repr(u"\uABCD")
assert encoded == u"'\\uabcd'"
def test_should_encode_8_byte_extended_character(self):
encoded = cypher_repr(u"\U0010ABCD")
assert encoded == u"'\\U0010abcd'"
def test_should_encode_complex_sequence(self):
encoded = cypher_repr(u"' '' '''")
assert encoded == u"\"' '' '''\""
class CypherListRepresentationTestCase(TestCase):
def test_should_encode_list(self):
encoded = cypher_repr([1, 2.0, u"three"])
assert encoded == u"[1, 2.0, 'three']"
def test_should_encode_empty_list(self):
encoded = cypher_repr([])
assert encoded == u"[]"
class CypherMapRepresentationTestCase(TestCase):
def test_should_encode_map(self):
encoded = cypher_repr(OrderedDict([("one", 1), ("two", 2.0), ("number three", u"three")]))
assert encoded == u"{one: 1, two: 2.0, `number three`: 'three'}"
def test_should_encode_empty_map(self):
encoded = cypher_repr({})
assert encoded == u"{}"
class CypherNodeRepresentationTestCase(TestCase):
def test_should_encode_empty_node(self):
a = Node()
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({})"
def test_should_encode_node_with_property(self):
a = Node(name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"({name: 'Alice'})"
def test_should_encode_node_with_label(self):
a = Node("Person")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {})"
def test_should_encode_node_with_label_and_property(self):
a = Node("Person", name="Alice")
encoded = cypher_repr(a, node_template="{labels} {properties}")
assert encoded == u"(:Person {name: 'Alice'})"
class CypherRelationshipRepresentationTestCase(TestCase):
def test_can_encode_relationship(self):
a = Node(name="Alice")
b = Node(name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def test_can_encode_relationship_with_names(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b)
encoded = cypher_repr(ab, related_node_template="{property.name}")
self.assertEqual("(Alice)-[:KNOWS {}]->(Bob)", encoded)
def test_can_encode_relationship_with_alternative_names(self):
a = Node("Person", nom=u"Aimée")
b = Node("Person", nom=u"Baptiste")
ab = KNOWS_FR(a, b)
encoded = cypher_repr(ab, related_node_template=u"{property.nom}")
self.assertEqual(u"(Aimée)-[:CONNAÎT {}]->(Baptiste)", encoded)
def test_can_encode_relationship_with_properties(self):
a = Node("Person", name="Alice")
b = Node("Person", name="Bob")
ab = KNOWS(a, b, since=1999)
encoded |
# This is a sample configuration file for an ISAPI filter and extension
# written in Python.
#
# Please see README.txt in this directory, and specifically the
# information about the "loader" DLL - installing this sample will create
# "_redirector_with_filter.dll" in the current directory. The readme explains
# this.
# Executing this script (or any server config script) will install the extension
# into your web server. As the server executes, the PyISAPI framework will load
# this module and create your Extension and Filter objects.
# This sample provides sample redirector:
# It is implemented by a filter and an extension, so that some requests can
# be ignored. Compare with 'redirector_simple' which avoids the filter, but
# is unable to selectively ignore certain requests.
# The process is sample uses is:
# * The filter is installed globally, as all filters are.
# * A Virtual Directory named "python" is setup. This dir has our ISAPI
# extension as the only application, mapped to file-extension '*'. Thus, our
# extension handles *all* requests in this directory.
# The basic process is that the filter does URL rewriting, redirecting every
# URL to our Virtual Directory. Our extension then handles this request,
# forwarding the data from the proxied site.
# For example:
# * URL of "index.html" comes in.
# * Filter rewrites this to "/python/index.html"
# * Our extension sees the full "/python/index.html", removes the leading
# portion, and opens and forwards the remote URL.
# This sample is very small - it avoid most error handling, etc. It is for
# demonstration purposes only.
from isapi import isapicon, threaded_extension
from isapi.simple import SimpleFilter
import sys
import traceback
import urllib.request, urllib.parse, urllib.error
# sys.isapidllhandle will exist when we are loaded by the IIS framework.
# In this case we redirect our output to the win32traceutil collector.
if hasattr(sys, "isapidllhandle"):
import win32traceutil
# The site we are proxying.
proxy = "http://www.python.org"
# The name of the virtual directory we install in, and redirect from.
virtualdir = "/python"
# The key feature of this redirector over the simple redirector is that it
# can choose to ignore certain responses by having the filter not rewrite them
# to our virtual dir. For this sample, we just exclude the IIS help directory.
# The ISAPI extension - handles requests in our virtual dir, and sends the
# response to the client.
class Extension(threaded_extension.ThreadPoolExtension):
"Python sample Extension"
def Dispatch(self, ecb):
# Note that our ThreadPoolExtension base class will catch exceptions
# in our Dispatch method, and write the tracebac | k to the client.
# That is perfect for this sample, so we don't catch our own.
#print 'IIS dispatching "%s"' % (ecb.GetServerVariable("URL"),)
url = ecb.GetServerVariable("URL")
if url.startswith(virtualdir):
new_url = proxy + url[len(virtualdir):]
print("Opening", new_url)
| fp = urllib.request.urlopen(new_url)
headers = fp.info()
ecb.SendResponseHeaders("200 OK", str(headers) + "\r\n", False)
ecb.WriteClient(fp.read())
ecb.DoneWithSession()
print("Returned data from '%s'!" % (new_url,))
else:
# this should never happen - we should only see requests that
# start with our virtual directory name.
print("Not proxying '%s'" % (url,))
# The ISAPI filter.
class Filter(SimpleFilter):
"Sample Python Redirector"
filter_flags = isapicon.SF_NOTIFY_PREPROC_HEADERS | \
isapicon.SF_NOTIFY_ORDER_DEFAULT
def HttpFilterProc(self, fc):
#print "Filter Dispatch"
nt = fc.NotificationType
if nt != isapicon.SF_NOTIFY_PREPROC_HEADERS:
return isapicon.SF_STATUS_REQ_NEXT_NOTIFICATION
pp = fc.GetData()
url = pp.GetHeader("url")
#print "URL is '%s'" % (url,)
prefix = virtualdir
if not url.startswith(prefix):
new_url = prefix + url
print("New proxied URL is '%s'" % (new_url,))
pp.SetHeader("url", new_url)
# For the sake of demonstration, show how the FilterContext
# attribute is used. It always starts out life as None, and
# any assignments made are automatically decref'd by the
# framework during a SF_NOTIFY_END_OF_NET_SESSION notification.
if fc.FilterContext is None:
fc.FilterContext = 0
fc.FilterContext += 1
print("This is request number %d on this connection" % fc.FilterContext)
return isapicon.SF_STATUS_REQ_HANDLED_NOTIFICATION
else:
print("Filter ignoring URL '%s'" % (url,))
# Some older code that handled SF_NOTIFY_URL_MAP.
#~ print "Have URL_MAP notify"
#~ urlmap = fc.GetData()
#~ print "URI is", urlmap.URL
#~ print "Path is", urlmap.PhysicalPath
#~ if urlmap.URL.startswith("/UC/"):
#~ # Find the /UC/ in the physical path, and nuke it (except
#~ # as the path is physical, it is \)
#~ p = urlmap.PhysicalPath
#~ pos = p.index("\\UC\\")
#~ p = p[:pos] + p[pos+3:]
#~ p = r"E:\src\pyisapi\webroot\PyTest\formTest.htm"
#~ print "New path is", p
#~ urlmap.PhysicalPath = p
# The entry points for the ISAPI extension.
def __FilterFactory__():
return Filter()
def __ExtensionFactory__():
return Extension()
if __name__=='__main__':
# If run from the command-line, install ourselves.
from isapi.install import *
params = ISAPIParameters()
# Setup all filters - these are global to the site.
params.Filters = [
FilterParameters(Name="PythonRedirector",
Description=Filter.__doc__),
]
# Setup the virtual directories - this is a list of directories our
# extension uses - in this case only 1.
# Each extension has a "script map" - this is the mapping of ISAPI
# extensions.
sm = [
ScriptMapParams(Extension="*", Flags=0)
]
vd = VirtualDirParameters(Name=virtualdir[1:],
Description = Extension.__doc__,
ScriptMaps = sm,
ScriptMapUpdate = "replace"
)
params.VirtualDirs = [vd]
HandleCommandLine(params)
|
##############################################################################
#
# Copyright (C) 2018 Compassion CH (http://www.compassion.ch)
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from base64 import b64decode, b64encode
from werkzeug.datastructures import FileStorage
from odoo import models, fields, _
_logger = logging.getLogger(__name__)
try:
import magic
except ImportError:
_logger.warning("Please install magic in order to use Muskathlon module")
class OrderMaterialForm(models.AbstractModel):
_name = "cms.form.order.material.mixin"
_inherit = "cms.form"
_form_model = "crm.lead"
_form_model_fields = ["partner_id", "description"]
_form_required_fields = ["flyer_german", "flyer_french"]
partner_id = fields.Many2one("res.partner", readonly=False)
event_id = fields.Many2one("crm.event.compassion", readonly=False)
form_id = fields.Char()
flyers_select = [(i, str(i)) for i in (0, 5, 10, 15, 20, 30)]
flyer_german = fields.Selection(flyers_select, string="Number of flyers in german", default=0)
flyer_french = fields.Selection(flyers_select, string="Number of flyers in french", default=0)
@property
def _form_fieldsets(self):
return [
{"id": "flyers", "fields": ["flyer_german", "flyer_french", "form_id"]},
]
@property
def form_msg_success_created(self):
return _(
"Thank you for your request. You will hear back from us "
"within the next days."
)
@property
def form_widgets(self):
# Hide fields
res = super(OrderMaterialForm, self).form_widgets
res.update(
{
"form_id": "cms_form_compassion.form.widget.hidden",
"partner_id": "cms_form_compassion.form.widget.hidden",
"event_id": "cms_form_compassion.form.widget.hidden",
"description": "cms_form_compassion.form.widget.hidden",
}
)
return res
@staticmethod
def create_description(material, values, languages=["french", "german"]):
lines = []
for lang in languages:
if int(values[f'flyer_{lang}']) > 0:
lines.append(f"<li>{values[f'flyer_{lang}']} <b>{material}</b> in {lang}</li>")
description = f"<ul>{''.join(lines)}</ul>"
return description
def form_init(self, request, main_object=None, **kw):
form = super(OrderMaterialForm, self).form_init(request, main_object, **kw)
# Set default values
registration = kw.get("registration")
form.partner_id = registration and registration.partner_id
form.event_id = registration and registration.compassion_event_id
return form
def form_before_create_or_update(self, values, extra_values):
""" Dismiss any pending status message, to avoid multiple
messages when multiple forms are present on same page.
"""
super(OrderMaterialForm, self).form_before_create_or_update(
values, extra_values
)
self.o_request.website.get_status_message()
staff_id = (
self.env["res.config.settings"]
.sudo()
.get_param("muskathlon_order_notify_id")
)
values.update(
{
"name": f"Muskathlon flyer order - {self.partner_id.name}",
"description": self.create_description("flyer", extra_values),
"user_id": staff_id,
"event_ids": [(4, self.event_id.id, None)],
"partner_id": self.partner_id.id,
}
)
def form_check_empty_value(self, fname, field, value, **req_values):
| """Invalidate the form if they order 0 flyers"""
is_valid = super().form_check_empty_value(fname, field, value, **req_values)
is_valid | |= int(req_values["flyer_french"]) + int(req_values["flyer_german"]) <= 0
return is_valid
def _form_create(self, values):
""" Run as Muskathlon user to authorize lead creation,
and prevents default mail notification to staff
(a better one is sent just after)."""
uid = self.env.ref("muskathlon.user_muskathlon_portal").id
self.main_object = self.form_model\
.sudo(uid).with_context(tracking_disable=True).create(values.copy())
def form_after_create_or_update(self, values, extra_values):
super(OrderMaterialForm, self).form_after_create_or_update(
values, extra_values
)
# Update contact fields on lead
self.main_object._onchange_partner_id()
# Send mail
email_template = self.env.ref("muskathlon.order_material_mail_template")
email_template.sudo().send_mail(
self.main_object.id,
raise_exception=False,
force_send=True,
email_values={
"attachments": [("picture.jpg", self.main_object.partner_id.image)],
"email_to": self.main_object.user_email,
},
)
return True
class OrderMaterialFormFlyer(models.AbstractModel):
_name = "cms.form.order.material"
_inherit = "cms.form.order.material.mixin"
form_id = fields.Char(default="order_material")
class OrderMaterialFormChildpack(models.AbstractModel):
_name = "cms.form.order.muskathlon.childpack"
_inherit = "cms.form.order.material.mixin"
form_id = fields.Char(default="muskathlon_childpack")
flyer_german = fields.Selection(string="Number of childpacks in german", default=0)
flyer_french = fields.Selection(string="Number of childpacks in french", default=0)
def form_before_create_or_update(self, values, extra_values):
super(OrderMaterialFormChildpack, self).form_before_create_or_update(
values, extra_values
)
values.update(
{
"name": f"Muskathlon childpack order - {self.partner_id.name}",
"description": self.create_description("childpack", extra_values),
}
)
|
def build_from_db_model(db_model):
raise NotImplementedError("Any entity subclass should implement the build_from_db_model method!")
class StudyModel(EntityModel):
@staticmethod
def build_from_db_model(db_study):
study_model = StudyModel()
study_model = StudyModel.copy_fields(db_study, study_model)
return study_model
@staticmethod
def copy_fields(src_study, dest_study):
dest_study.accession_number = src_study.accession_number
dest_study.study_type = src_study.study_type
dest_study.study_title = src_study.study_title
dest_study.faculty_sponsor = src_study.faculty_sponsor
dest_study.ena_project_id = src_study.ena_project_id
dest_study.study_visibility = src_study.ena_project_id
dest_study.description = src_study.description
dest_study.pi_list = src_study.pi_list
return dest_study
def get_entity_identifying_field(self):
if self.name:
return str(self.name)
elif self.internal_id:
return self.internal_id
return None
class AbstractLibraryModel(SerapisModel):
@staticmethod
def build_from_db_model(db_alib):
alib_model = AbstractLibraryModel()
alib_model = AbstractLibraryModel.copy_fields(db_alib, alib_model)
return alib_model
@staticmethod
def copy_fields(src_alib, dest_alib):
dest_alib.coverage = src_alib.coverage
dest_alib.library_source = src_alib.library_source
dest_alib.library_strategy = src_alib.library_strategy
| dest_alib.instrument_model = src_alib.instrument_ | model
return dest_alib
class LibraryModel(AbstractLibraryModel, EntityModel):
@staticmethod
def build_from_db_model(db_lib):
lib_model = LibraryModel()
lib_model = LibraryModel.copy_fields(db_lib,lib_model)
return lib_model
@staticmethod
def copy_fields(src_lib, dest_lib):
dest_lib.library_type = src_lib.library_type
dest_lib.public_name = src_lib.public_name
dest_lib.sample_internal_id = src_lib.sample_internal_id
return dest_lib
class ReferenceGenomeModel(SerapisModel):
@staticmethod
def build_from_db_model(db_reference_genome):
ref_model = ReferenceGenomeModel()
ref_model = ReferenceGenomeModel.copy_fields(db_reference_genome, ref_model)
#ref_model.md5 = db_reference_genome.id
return ref_model
@staticmethod
def copy_fields(old_ref, new_ref):
#new_ref.md5 = old_ref.md5
new_ref.paths = old_ref.paths
new_ref.name = old_ref.name
return new_ref
class SampleModel(EntityModel):
@staticmethod
def build_from_db_model(db_sample):
sample_model = SampleModel()
sample_model = SampleModel.copy_fields(db_sample, sample_model)
return sample_model
@staticmethod
def copy_fields(old_sample, new_sample):
new_sample.accession_number = old_sample.accession_number
new_sample.sanger_sample_id = old_sample.sanger_sample_id
new_sample.public_name = old_sample.public_name
new_sample.sample_tissue_type = old_sample.sample_tissue_type
new_sample.reference_genome = old_sample.reference_genome
new_sample.taxon_id = old_sample.taxon_id
new_sample.gender = old_sample.gender
new_sample.cohort = old_sample.cohort
new_sample.ethnicity = old_sample.ethnicity
new_sample.country_of_origin = old_sample.country_of_origin
new_sample.geographical_region = old_sample.geographical_region
new_sample.organism = old_sample.organism
new_sample.common_name = old_sample.common_name
return new_sample
class IndexFileModel(SerapisModel):
@staticmethod
def build_from_db_model(db_index):
index_model = IndexFileModel()
index_model = IndexFileModel.copy_fields(db_index, index_model)
return index_model
@staticmethod
def copy_fields(src_index, dest_index):
dest_index.dest_path = src_index.dest_path
dest_index.file_path_client = src_index.file_path_client
dest_index.md5 = src_index.md5
return dest_index
class SubmittedFileModel(SerapisModel):
@staticmethod
def build_from_db_model(db_file):
''' Receives a database model as parameter and extracts from it
the information needed to create this (model) object.
'''
file_model = SubmittedFileModel()
file_model = SubmittedFileModel.copy_fields(db_file, file_model)
ref_genome = data_access.ReferenceGenomeDataAccess.retrieve_reference_by_id(db_file.file_reference_genome_id)
file_model.reference_genome = ReferenceGenomeModel.build_from_db_model(ref_genome)
return SubmittedFileModel.copy_fields(db_file, file_model)
@staticmethod
def copy_fields(src_file, dest_file):
dest_file.file_id = src_file.file_id
dest_file.file_type = src_file.file_type
dest_file.file_path_client = src_file.file_path_client
dest_file.dest_path = src_file.dest_path
dest_file.md5 = src_file.md5
dest_file.data_type = src_file.data_type
dest_file.data_subtype_tags = src_file.data_subtype_tags
dest_file.access_group = src_file.access_group
dest_file.security_level = src_file.security_level
dest_file.pmid_list = src_file.pmid_list
# Nested:
dest_file.study_list = [ StudyModel.build_from_db_model(a) for a in src_file.study_list]
dest_file.library_list = [ LibraryModel.build_from_db_model(a) for a in src_file.library_list]
dest_file.entity_set = [SampleModel.build_from_db_model(a) for a in src_file.entity_set]
dest_file.abstract_library = AbstractLibraryModel.build_from_db_model(src_file.abstract_library)
dest_file.index_file = IndexFileModel.build_from_db_model(src_file.index_file)
return dest_file
class BAMFileModel(SubmittedFileModel):
@staticmethod
def build_from_db_model(db_bamfile):
bamfile_model = BAMFileModel()
bamfile_model = BAMFileModel.copy_fields(db_bamfile, bamfile_model)
return SubmittedFileModel.copy_fields(db_bamfile, bamfile_model)
@staticmethod
def copy_fields(src_file, dest_file):
dest_file.seq_centers = src_file.seq_centers
dest_file.run_list = src_file.run_list
dest_file.platform_list = src_file.platform_list
dest_file.seq_date_list = src_file.seq_date_list
dest_file.library_well_list = src_file.library_well_list
dest_file.multiplex_lib_list = src_file.multiplex_lib_list
return dest_file
class VCFFileModel(SubmittedFileModel):
@staticmethod
def build_from_db_model(db_vcffile):
vcf_model = VCFFileModel()
vcf_model = VCFFileModel.copy_fields(db_vcffile, vcf_model)
return SubmittedFileModel.copy_fields(db_vcffile, vcf_model)
@staticmethod
def copy_fields(src_file, dest_file):
dest_file.file_format = src_file.file_format
dest_file.used_samtools = src_file.used_samtools
dest_file.used_unified_genotyper = src_file.used_unified_genotyper
return dest_file
class Submission(SerapisModel):
@staticmethod
def build_from_db_model(db_submission):
submission_model = Submission()
submission_model = Submission.copy_fields(db_submission, submission_model)
files = data_access.SubmissionDataAccess.retrieve_all_files_for_submission(db_submission.id)
submission_model.files_list = [f.file_id for f in files]
return submission_model
@staticmethod
def copy_fields(src_subm, dest_subm):
dest_subm.sanger_user_id = src_subm.sanger_user_id
dest_subm.access_group = src_subm.access_group
dest_subm.submission_date = src_subm.submission_date
dest_subm.file_type = src_subm.file_type
dest_subm.irods_coll |
DO_PROFILE)
def splitIterator(text, size):
# assert size > 0, "size should be > 0"
for start in range(0, len(text), size):
yield text[start:start + size]
prev_sum = 0
MMAP_NO_DATA_INDICATE_ZERO = False
MMAP_NO_DATA_INDICATE_NON_ZERO = True
@do_profile(DO_PROFILE)
def get_data_from_mmap():
#
#t0 = time()
nbr_buffers_received = get_nbr_received_buffers_from_mmap()
nbr_mmap_files = len(mmap_data)
zeros = np.zeros(NBR_DATA_POINTS_PER_BUFFER_INT)
''' no new buffers - generate one empty dummy buffer and return '''
if nbr_buffers_received == 0 or nbr_buffers_received == -1:
return create_empty_data_buffer(nbr_mmap_files, zeros)
nbr_buffers_received = int(nbr_buffers_received)
nbr_elements = nbr_buffers_received * NBR_DATA_POINTS_PER_BUFFER_INT
range_nbr_mmap_files = range(nbr_mmap_files)
# check if there's any data that's ready for pickup.
new_data_found = np.zeros(nbr_mmap_files)
for mmap_file_index in range_nbr_mmap_files:
# go to beginning of memory mapped area
mmap_data[mmap_file_index].seek(0)
# quit right away if no new data has been written yet.
this_element = mmap_data[mmap_file_index].read(MMAP_BYTES_PER_FLOAT)
this_element = unpack('d | ', this_element)[0]
if round(this_element, 8) != DATA_RECEIVED_ACK_NUM:
new_data_found[mmap_file_index] = 1
# none of the files contain new data
if sum(n | ew_data_found) == 0:
return create_empty_data_buffer(nbr_mmap_files, zeros, nbr_buffers_received)
''' read out transferred data '''
data = []
# this is ~ 10ms slower.
#data = np.zeros((nbr_mmap_files, nbr_buffers_received, NBR_DATA_POINTS_PER_BUFFER_INT))
# at least one new buffer has arrived.
for mmap_file_index in range_nbr_mmap_files:
#'''
# pre-allocate each buffer
buffers = []
for buffer_index in xrange(nbr_buffers_received):
# DONE: find out what the problem here is:
# there seems to be a bug in python on windows, or I don't understand the way things work:
# if I create 'zeros' outside this loop, the second time that 'zeros' gets called,
# it will contain all values found in data[mmap_file_index][buffer][j]. Therefore I have to re-generate
# the 'zeros' for each mmap_file_index'th loop.
# SOLUTION:
# We need to make a 'deep-copy' of zeros, otherwise we are just
# passing a reference to the same object (which is a np.array object).
zero_copy = zeros.copy()
buffers.append(zero_copy)
# add all buffers to mmap_file_index'th data stream.
data.append(buffers)
#'''
# go to beginning of memory mapped area & read out all elements
mmap_data[mmap_file_index].seek(0)
all_values_string = mmap_data[mmap_file_index].read(nbr_elements * MMAP_BYTES_PER_FLOAT)
# 0.1632 per call in debugger
# grab sub-list so we avoid having to call this list by its index.
this_data = data[mmap_file_index]
# unpack all values at once
unpacked_values = unpack("d" * nbr_elements, all_values_string)
# using list comprehension is better than a regular loop with random array access
this_data = [unpacked_values[i:i+NBR_DATA_POINTS_PER_BUFFER_INT] for i in xrange(0, nbr_elements, NBR_DATA_POINTS_PER_BUFFER_INT)]
# slower version of above line.
#for abs_idx in range(nbr_elements):
# this_data[abs_idx / NBR_DATA_POINTS_PER_BUFFER_INT][abs_idx % NBR_DATA_POINTS_PER_BUFFER_INT] = unpacked_values[abs_idx]
# write-back sub-list
data[mmap_file_index] = this_data
''' original version.
# these next few lines are responsible for 90% of the time spent in this function.
# 0.4974s per call in debugger
element_values_list = list(splitIterator(all_values_string, MMAP_BYTES_PER_FLOAT))
for abs_element_index in range(nbr_elements):
this_element = element_values_list[abs_element_index]
this_element = unpack('d', this_element)[0]
buffer_nbr = abs_element_index / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer = abs_element_index % NBR_DATA_POINTS_PER_BUFFER_INT
data[mmap_file_index][buffer_nbr][index_in_buffer] = this_element
'''
''' useless alternatives
# even worse: -> ~ 0.0063 secs per call
unpacked_values = [unpack('d', element_values_list[j])[0] for j in range(nbr_elements)]
# worst: ~0.0160 secs per call
buffer_ids = np.arange(nbr_elements) / NBR_DATA_POINTS_PER_BUFFER_INT
index_in_buffer_id = np.arange(nbr_elements) % NBR_DATA_POINTS_PER_BUFFER_INT
for abs_element_index in range(nbr_elements):
data[mmap_file_index][buffer_ids[abs_element_index]][index_in_buffer_id[abs_element_index]] = unpacked_values[abs_element_index]
'''
#t1 = time()
#print 'get_data_from_mmap() takes %f seconds' %(t1-t0)
# go to beginning of memory mapped area and overwrite first value with
# ACK string so that the sender knows that it is safe to overwrite the
# previous data (== send new data).
for mmap_file_index in range_nbr_mmap_files:
mmap_data[mmap_file_index].seek(0)
mmap_data[mmap_file_index].write(DATA_RECEIVED_ACK_STR)
# overwrite the 'number of buffers received' field with zero, so that we don't
# keep reading in this very same data.
mmap_stats_go_to_nbr_received_buffers_pos()
mmap_stats.write(NBR_BUFFERS_ZERO_STR)
return data
@do_profile(DO_PROFILE)
def update_vbo_with_data_from_plot_queue():
global x_shift_current, x_shift_single_buffer_current
global pointer_shift
global vbos, colors
global c_vbo # counter needed for VBO positioning
global pointer_offset, nbr_points_rendered_in_last_vbo
for j in xrange(NBR_BUFFERS_TO_UPDATE):
# grab 'raw_data' from beginning of plot queue.
raw_data = get_data_from_plot_queue()
data = transform_vector_of_buffers_to_GPU_format(raw_data, x_shift_single_buffer_current)
### VBO POSITIONING
pos_to_overwrite = c_vbo % (NBR_DATA_POINTS_PER_VBO / NBR_DATA_POINTS_PER_BUFFER)
nbr_points_rendered_in_last_vbo = int(NBR_DATA_POINTS_PER_BUFFER * pos_to_overwrite)
# at which location in the memory (in bytes) of the VBO should we replace the data?
# also needed for plotting.
pointer_offset = nbr_points_rendered_in_last_vbo * BYTES_PER_POINT
nbr_data_streams = len(data)
for panel in range(NBR_PANELS):
update_line_segment_on_GPU(vbos[panel][-1], pointer_offset, data[panel % nbr_data_streams])
c_vbo += 1
x_shift_single_buffer_current += SHIFT_X_SINGLE_BUFFER
pointer_shift += NBR_DATA_POINTS_PER_BUFFER
# check whether we reached the end of the VBO and thus need to rotate it.
if pointer_shift == NBR_DATA_POINTS_PER_VBO:
pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo = rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo)
@do_profile(DO_PROFILE)
def rotate_vbos_clear_last_vbo(pointer_shift, pointer_offset, x_shift_current, vbos, colors, c_vbo):
# reset pointer offsets / shifts
# TODO: clean up and clarify 'pointer_shift' vs 'pointer_offset'!
pointer_shift = 0
pointer_offset = 0
c_vbo = 0
x_shift_current += SHIFT_X_BY
''' this is not fast enough and will lead to jitter effects
# generate new data set for each panel
tmp_points = [ [None] for j in range(NBR_PANELS)]
for panel in range(NBR_PANELS):
tmp_points_panel = generate_line_segment_zeros(x_shift=x_shift_current)
tmp_points[panel] = transform_line_points_to_data_format_for_GPU(tmp_points_panel)
'''
for panel in range(NBR_PANELS):
this_vbo = vbos[panel][0]
this_color = colors[panel][0]
# Delete current vbo and replace with new one.
# We could just re-use the current vbo, however this might lead to 'blinking' artifacts
# with th |
# -*- coding: utf-8 -*-
import urllib
from oauth2 import Consumer
from webob import Request, Response
from wsgioauth import calls
from wsgioauth.provider import Application, Storage
from wsgioauth.utils import CALLS
from wsgioauth.provider import Storage, Token
ROUTES = {
u'getConsumers': calls.getConsumers,
u'getRequestTokens': calls.getRequestTokens,
u'getAccessTokens': calls.getAccessTokens,
u'provisionConsumer': calls.provisionConsumer,
u'provisionRequestToken': calls.provisionRequestToken,
u'provisionAccessToken': calls.provisionAccessToken,
u'deleteConsumer': calls.deleteConsumer,
u'deleteRequestToken': calls.deleteRequestToken,
u'deleteAccessToken': calls.deleteAccessToken,
}
def getMockStor | age():
from wsgioauth.provider import OAUTH_CLASSES
OAUTH_CLASSES['consumer'] = Consumer
OAUTH_CLASSES['request_token'] = Token
return Storage
def echo_app(environ, start_response):
"""Simple app that echos a POST request"""
req = Request(environ)
resp = Response(urllib.urlencode(req.params))
return resp(environ, start_response)
def echo_app_factory(*global_conf, **local_conf):
return echo_app
STORAGE = None
def app_factory(* | global_conf, **local_conf):
CALLS.update(ROUTES)
global STORAGE
if STORAGE is None:
storage_cls = getMockStorage()
STORAGE = storage_cls(local_conf)
def storage_lookup(environ, conf):
return STORAGE
return Application(storage_lookup, **local_conf)
def filter_factory(app, *global_conf, **local_conf):
"""This function returns a wsgioauth.provider.Filter services factory."""
from wsgioauth.mock import getMockStorage
global STORAGE
if STORAGE is None:
storage_cls = getMockStorage()
STORAGE = storage_cls(local_conf)
def storage_lookup(environ, conf):
return STORAGE
from wsgioauth.provider import Middleware
return Middleware(app, storage_lookup, **local_conf)
|
import uuid
import xbmc
import xbmcaddon
settings = {}
addon = xbmcaddon.Addon()
settings['debug'] = addon.getSetting('debug') == "true"
settings['powersave_minutes'] = int(addon.getSetting(' | powersave_minutes'))
s | ettings['version'] = addon.getAddonInfo('version')
settings['uuid'] = str(addon.getSetting('uuid')) or str(uuid.uuid4())
addon.setSetting('uuid', settings['uuid'])
|
ll_table[1044]="eventfd"
syscall_table[19]="eventfd2"
syscall_table[221]="execve"
syscall_table[93]="exit"
syscall_table[94]="exit_group"
syscall_table[48]="faccessat"
syscall_table[223]="fadvise64"
syscall_table[47]="fallocate"
syscall_table[262]="fanotify_init"
syscall_table[263]="fanotify_mark"
syscall_table[50]="fchdir"
syscall_table[52]="fchmod"
syscall_table[53]="fchmodat"
syscall_table[55]="fchown"
syscall_table[54]="fchownat"
syscall_table[25]="fcntl"
syscall_table[1052]="fcntl64"
syscall_table[83]="fdatasync"
syscall_table[10]="fgetxattr"
syscall_table[13]="flistxattr"
syscall_table[32]="flock"
syscall_table[1079]="fork"
syscall_table[16]="fremovexattr"
syscall_table[7]="fsetxattr"
syscall_table[80]="fstat"
syscall_table[1051]="fstat64"
syscall_table[79]="fstatat64"
syscall_table[44]="fstatfs"
syscall_table[1055]="fstatfs64"
syscall_table[82]="fsync"
syscall_table[46]="ftruncate"
syscall_table[1047]="ftr | uncate64"
syscall_table[98]="futex"
syscall_table[1066]="futimesat"
syscall_table[168]="getcpu"
syscall_table[17]="getcwd"
syscall_table[1065]="getdents"
syscall_table[61]="getdents64"
syscall_table[177]="getegid"
syscall_table[175]="geteuid"
syscall_table[176]="getgid"
syscall_table[158]="getgroups"
syscall_table[102]="getitimer"
syscall_table[236]="get_mempolicy"
syscall_table[205]="getpeername"
sy | scall_table[155]="getpgid"
syscall_table[1060]="getpgrp"
syscall_table[172]="getpid"
syscall_table[173]="getppid"
syscall_table[141]="getpriority"
syscall_table[150]="getresgid"
syscall_table[148]="getresuid"
syscall_table[163]="getrlimit"
syscall_table[100]="get_robust_list"
syscall_table[165]="getrusage"
syscall_table[156]="getsid"
syscall_table[204]="getsockname"
syscall_table[209]="getsockopt"
syscall_table[178]="gettid"
syscall_table[169]="gettimeofday"
syscall_table[174]="getuid"
syscall_table[8]="getxattr"
syscall_table[105]="init_module"
syscall_table[27]="inotify_add_watch"
syscall_table[1043]="inotify_init"
syscall_table[26]="inotify_init1"
syscall_table[28]="inotify_rm_watch"
syscall_table[3]="io_cancel"
syscall_table[29]="ioctl"
syscall_table[1]="io_destroy"
syscall_table[4]="io_getevents"
syscall_table[31]="ioprio_get"
syscall_table[30]="ioprio_set"
syscall_table[0]="io_setup"
syscall_table[2]="io_submit"
syscall_table[104]="kexec_load"
syscall_table[219]="keyctl"
syscall_table[129]="kill"
syscall_table[1032]="lchown"
syscall_table[9]="lgetxattr"
syscall_table[1025]="link"
syscall_table[37]="linkat"
syscall_table[201]="listen"
syscall_table[11]="listxattr"
syscall_table[12]="llistxattr"
syscall_table[18]="lookup_dcookie"
syscall_table[15]="lremovexattr"
syscall_table[62]="lseek"
syscall_table[6]="lsetxattr"
syscall_table[1039]="lstat"
syscall_table[1050]="lstat64"
syscall_table[233]="madvise"
syscall_table[235]="mbind"
syscall_table[238]="migrate_pages"
syscall_table[232]="mincore"
syscall_table[1030]="mkdir"
syscall_table[34]="mkdirat"
syscall_table[1027]="mknod"
syscall_table[33]="mknodat"
syscall_table[228]="mlock"
syscall_table[230]="mlockall"
syscall_table[222]="mmap"
syscall_table[40]="mount"
syscall_table[239]="move_pages"
syscall_table[226]="mprotect"
syscall_table[185]="mq_getsetattr"
syscall_table[184]="mq_notify"
syscall_table[180]="mq_open"
syscall_table[183]="mq_timedreceive"
syscall_table[182]="mq_timedsend"
syscall_table[181]="mq_unlink"
syscall_table[216]="mremap"
syscall_table[187]="msgctl"
syscall_table[186]="msgget"
syscall_table[188]="msgrcv"
syscall_table[189]="msgsnd"
syscall_table[227]="msync"
syscall_table[229]="munlock"
syscall_table[231]="munlockall"
syscall_table[215]="munmap"
syscall_table[101]="nanosleep"
syscall_table[1054]="newfstatat"
syscall_table[42]="nfsservctl"
syscall_table[1024]="open"
syscall_table[56]="openat"
syscall_table[1061]="pause"
syscall_table[241]="perf_event_open"
syscall_table[92]="personality"
syscall_table[1040]="pipe"
syscall_table[59]="pipe2"
syscall_table[41]="pivot_root"
syscall_table[1068]="poll"
syscall_table[73]="ppoll"
syscall_table[167]="prctl"
syscall_table[67]="pread64"
syscall_table[69]="preadv"
syscall_table[261]="prlimit64"
syscall_table[72]="pselect6"
syscall_table[117]="ptrace"
syscall_table[68]="pwrite64"
syscall_table[70]="pwritev"
syscall_table[60]="quotactl"
syscall_table[63]="read"
syscall_table[213]="readahead"
syscall_table[1035]="readlink"
syscall_table[78]="readlinkat"
syscall_table[65]="readv"
syscall_table[142]="reboot"
syscall_table[1073]="recv"
syscall_table[207]="recvfrom"
syscall_table[243]="recvmmsg"
syscall_table[212]="recvmsg"
syscall_table[234]="remap_file_pages"
syscall_table[14]="removexattr"
syscall_table[1034]="rename"
syscall_table[38]="renameat"
syscall_table[218]="request_key"
syscall_table[128]="restart_syscall"
syscall_table[1031]="rmdir"
syscall_table[134]="rt_sigaction"
syscall_table[136]="rt_sigpending"
syscall_table[135]="rt_sigprocmask"
syscall_table[138]="rt_sigqueueinfo"
syscall_table[139]="rt_sigreturn"
syscall_table[133]="rt_sigsuspend"
syscall_table[137]="rt_sigtimedwait"
syscall_table[240]="rt_tgsigqueueinfo"
syscall_table[123]="sched_getaffinity"
syscall_table[121]="sched_getparam"
syscall_table[125]="sched_get_priority_max"
syscall_table[126]="sched_get_priority_min"
syscall_table[120]="sched_getscheduler"
syscall_table[127]="sched_rr_get_interval"
syscall_table[122]="sched_setaffinity"
syscall_table[118]="sched_setparam"
syscall_table[119]="sched_setscheduler"
syscall_table[124]="sched_yield"
syscall_table[1067]="select"
syscall_table[191]="semctl"
syscall_table[190]="semget"
syscall_table[193]="semop"
syscall_table[192]="semtimedop"
syscall_table[1074]="send"
syscall_table[71]="sendfile"
syscall_table[1046]="sendfile64"
syscall_table[211]="sendmsg"
syscall_table[206]="sendto"
syscall_table[162]="setdomainname"
syscall_table[152]="setfsgid"
syscall_table[151]="setfsuid"
syscall_table[144]="setgid"
syscall_table[159]="setgroups"
syscall_table[161]="sethostname"
syscall_table[103]="setitimer"
syscall_table[237]="set_mempolicy"
syscall_table[154]="setpgid"
syscall_table[140]="setpriority"
syscall_table[143]="setregid"
syscall_table[149]="setresgid"
syscall_table[147]="setresuid"
syscall_table[145]="setreuid"
syscall_table[164]="setrlimit"
syscall_table[99]="set_robust_list"
syscall_table[157]="setsid"
syscall_table[208]="setsockopt"
syscall_table[96]="set_tid_address"
syscall_table[170]="settimeofday"
syscall_table[146]="setuid"
syscall_table[5]="setxattr"
syscall_table[196]="shmat"
syscall_table[195]="shmctl"
syscall_table[197]="shmdt"
syscall_table[194]="shmget"
syscall_table[210]="shutdown"
syscall_table[132]="sigaltstack"
syscall_table[1045]="signalfd"
syscall_table[74]="signalfd4"
syscall_table[1999]="sigreturn"
syscall_table[198]="socket"
syscall_table[199]="socketpair"
syscall_table[76]="splice"
syscall_table[1038]="stat"
syscall_table[1049]="stat64"
syscall_table[43]="statfs"
syscall_table[1056]="statfs64"
syscall_table[225]="swapoff"
syscall_table[224]="swapon"
syscall_table[1036]="symlink"
syscall_table[36]="symlinkat"
syscall_table[81]="sync"
syscall_table[84]="sync_file_range2"
syscall_table[1078]="_sysctl"
syscall_table[179]="sysinfo"
syscall_table[116]="syslog"
syscall_table[77]="tee"
syscall_table[131]="tgkill"
syscall_table[1062]="time"
syscall_table[107]="timer_create"
syscall_table[111]="timer_delete"
syscall_table[85]="timerfd_create"
syscall_table[87]="timerfd_gettime"
syscall_table[86]="timerfd_settime"
syscall_table[109]="timer_getoverrun"
syscall_table[108]="timer_gettime"
syscall_table[110]="timer_settime"
syscall_table[153]="times"
syscall_table[130]="tkill"
syscall_table[45]="truncate"
syscall_table[1048]="truncate64"
syscall_table[166]="umask"
syscall_table[1076]="umount"
syscall_table[39]="umount2"
syscall_table[160]="uname"
syscall_table[1026]="unlink"
syscall_table[35]="unlinkat"
syscall_table[97]="unshare"
syscall_table[1077]="uselib"
syscall_table[1070]="ustat"
syscall_table[1063]="utime"
syscall_table[88]="utimensat"
syscall_table[1037]="utimes"
syscall_table[1071]="vfork"
syscall_table[58]="vhangup"
syscall_table[75]="vmsplice"
syscall_table[260]="wait4"
syscall_table[95]="waitid"
syscall_table[64]="write"
syscall_table[66]="writev"
def get(no):
return syscall_tabl |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.